query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Delete a draft belonging to a particular user. | def do_delete_draft(draft_id: int, user_profile: UserProfile) -> None:
try:
draft_object = Draft.objects.get(id=draft_id, user_profile=user_profile)
except Draft.DoesNotExist:
raise ResourceNotFoundError(_("Draft does not exist"))
draft_id = draft_object.id
draft_object.delete()
event = {"type": "drafts", "op": "remove", "draft_id": draft_id}
send_event(user_profile.realm, event, [user_profile.id]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_draft(draft_uuid):\n api_request('delete', api_url('drafts', str(draft_uuid)))",
"def delete(self,\n draft_id,\n ):\n return self._invoke('delete',\n {\n 'draft_id': draft_id,\n })",
"def delete(self, user_id):\r\n return delete_user(request, user_id)",
"def delete(self, user_id):\n return delete_user(user_id)",
"def _delete_draft_message(draft):\n if draft is not None:\n draft.key.delete()\n return HttpTextResponse('OK')",
"def delete_user(self, user):\n self.delete(user)",
"def delete(self, user: 'UserCondensed'):\n self._delete(entity=user)",
"def delete_drafts(request):\n query = models.Comment.query(\n models.Comment.author == request.user, models.Comment.draft == True,\n ancestor=request.issue.key)\n keys = query.fetch(keys_only=True)\n ndb.delete_multi(keys)\n request.issue.calculate_draft_count_by_user()\n request.issue.put()\n return HttpResponseRedirect(\n reverse(publish, args=[request.issue.key.id()]))",
"def delete(user_id):\n assert isinstance(user_id, ObjectId)\n\n User.objects(id=user_id).delete()",
"def delete_user(self, user):\n # noinspection PyUnresolvedReferences\n self.delete(user)",
"def delete(user_id):\n # Get the user requested\n user = User.query.filter(User.user_id == user_id).one_or_none()\n\n if user is not None:\n db.session.delete(user)\n db.session.commit()\n return (\n \"User {user_id} deleted\".format(user_id=user_id), 200\n )\n\n else:\n abort(\n 404,\n \"Person not found for Id: {user_id}\".format(user_id=user_id),\n )",
"def delete(self, user_id):\n\n user = User.objects.get_or_404(public_id=user_id)\n return user.delete()",
"def delete(user_id: int):\n usr = get_by_id(user_id)\n if not usr:\n raise UserNotFound\n\n db.session.delete(usr)\n db.session.commit()",
"def delete_user(self, user):\n self.execute(TABELLE['id_users'][\"delete\"], user[\"id\"])",
"def delete_user():",
"def delete_user(self, user_id):\n\n # ask the model to delete the user\n um = User(self.settings)\n status = um.delete(user_id)\n\n # return\n return status",
"def DeleteDraft(host, change):\n path = _GetChangePath(change)\n try:\n FetchUrl(host, path, reqtype='DELETE', ignore_204=True, ignore_404=False)\n except GOBError as e:\n # On success, gerrit returns status 204; anything else is an error.\n if e.http_status != 204:\n raise\n else:\n raise GOBError(\n 200, 'Unexpectedly received a 200 http status while deleting draft %r'\n % change)",
"def delete_user(self, user_id):\n sql = 'update account_user set is_deleted = 1 where id = %s'\n with connection.cursor() as cursor:\n cursor.execute(sql, [user_id])\n row = cursor.fetchone()\n\n return row",
"def delete_item(self, id: str, user: User) -> bool:",
"def delete_user():\n #TODO user delete\n pass",
"def delete_user(self, user_name):\n user = self.get_user(user_name)\n return self.client.delete_resource(user.get('href'))",
"def delete(self, user_id):\n user = User.query.get(user_id)\n \n if user is None:\n return abort(422, message=\"User does not exist\")\n \n # check if the user is an admin and is the only one\n admins = User.query.filter_by(admin=True).all()\n if user.id == get_jwt_identity() and len(admins) == 1:\n return abort(422, message=\"User is the only admin, there must be at least one admin in the system\")\n \n user.delete()\n \n return { 'message': \"User '{}' has been deleted\".format(user.id) }",
"def delete_user(user_id):\n\n user = User.query.get(user_id)\n db.session.delete(user)\n db.session.commit()\n return",
"def deleteUser(user):\n delete_user(user)\n return redirect(url_for('login'))",
"async def delete_user(user_id):\n \n user = User.select().where(User.id == user_id).first()\n\n if not user:\n return HTTPException(404, 'User not found')\n else:\n user.delete_instance()\n\n return f\"User {user.username} deleted successfully\"",
"def delete_state(self, user=None):\r\n if user:\r\n self.q(css='input[id^=sd_fu_]').fill(user)\r\n self.q(css='section.staff-modal a#staff-debug-sdelete').click()",
"def get_delete(self, user_id):\n return self.post_delete(user_id)",
"def delete(self, user_id):\n user = User.query.get(user_id)\n\n if user is None:\n return mk_response(\"User does not exist\", 422)\n\n # check if the user is an admin and is the only one\n admins = User.query.filter_by(admin=True).all()\n if user.id == get_jwt_identity() and len(admins) == 1:\n return mk_response(\"User is the only admin, there must \" +\n \"be at least one admin in the system\", 422)\n\n user.delete()\n\n return mk_response(\"User '{}' has been deleted\".format(user.id))",
"def delete(self, request, user_id=None):\n data = json.loads(request.body.decode())\n authenticated = Account.check_credentials(request, data['email'], data['password'])\n user = {}\n user['account_id'] = authenticated.id\n\n if authenticated.check_admin(request, user):\n NLTKOutput.remove(request=request, pk=user_id)\n Account.remove(request=request, pk=user_id)\n return Response(json='Account and content deleted', status=204)\n\n return Response(json='Not Authorized', status=401)",
"def delete_user(self, user_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None or len(user_id) == 0:\n raise Exception(\"Bad parameter.\")\n return self.database.delete_user(user_id)"
] | [
"0.7564674",
"0.7383004",
"0.6840054",
"0.6774973",
"0.67743796",
"0.66971105",
"0.6603117",
"0.65684706",
"0.654473",
"0.6492794",
"0.6456428",
"0.63981634",
"0.6362671",
"0.63221097",
"0.62822104",
"0.62783",
"0.62724733",
"0.62385744",
"0.61420053",
"0.61398315",
"0.6122251",
"0.6109359",
"0.6092476",
"0.6091207",
"0.60840106",
"0.6053058",
"0.60407203",
"0.60404986",
"0.60355985",
"0.60286033"
] | 0.77435535 | 0 |
Get zero version `0.0.0` | def zero(cls: Type[_R]) -> _R:
return cls("0.0.0") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_version():\n return \"0.0.1 (prerelease prototype)\"",
"def version_number() -> int:\n return 0",
"def get_version():\n return '%d.%d.%d' % version_info",
"def current_version(self):\n try:\n return self.release_set.order_by('-created')[0].version\n except IndexError:\n return \"0.0.0\"",
"def get_version():\n ver = '0.0.0'\n req = restcall(0, 'config', 10.0)\n if req['text'] is not None:\n try: \n tree = ET.fromstring(req['text'])\n ver = tree.findall('app_version')[0].text\n if ver is None:\n ver = '0.0.0'\n _LOGGER.info(\"ISY: firmware version: %s\", ver)\n except ET.ParseError:\n _LOGGER.error(\"No version information found on ISY.\")\n return ver",
"def get_version(self):\n return 0",
"def _get_version(self):",
"def get_version():\n return 1",
"def getversion():\r\n\r\n global VERSION\r\n\r\n if len(VERSION) == 3:\r\n return '{}.{}.{}'.format(VERSION[0], VERSION[1], VERSION[2])\r\n else:\r\n return '{}.{}.{}-{}'.format(VERSION[0], VERSION[1], VERSION[2], VERSION[3])",
"def test_parse_version(self):\n version = VersionNumberScaleMeasurement.parse_version(None)\n self.assertEqual(Version(\"0\"), version)",
"def get_version(self):\n url = '{}/v2/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['version']\n except Exception as e:\n pass\n return ''",
"def get_version(self):\n url = '{}/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['version']\n except Exception as e:\n pass\n return ''",
"def __getNullVersion(self):\n print(\"Can't get version\")\n return \"unknownVendor\", \"unknownRelease\"",
"def get_version():\n\n version_string = version_from_versioneer()\n\n if not version_string:\n version_string = version_from_pip()\n\n return version_string",
"def get_version():\n click.echo(get_current_version_number())",
"def get_version_number():\n return [0, 1, 0]",
"def delete_closing_zero(model_version: str) -> str:\r\n if model_version[-2:] == \".0\":\r\n return model_version[:-2]\r\n return model_version",
"def test_get_version(self):\n pass",
"def get_version():\n return about.get_version()",
"def GetVersion(self):\n return self._SendRequest(HTTP_GET, \"/version\", None, None)",
"def get_version(self):\n url = '{}/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['orionld version']\n except Exception as e:\n pass\n return ''",
"def default_version_number(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"default_version_number\")",
"def version(self) -> str:\n return '0.1'",
"def Version(self) -> _n_0_t_12:",
"def Version(self) -> _n_0_t_12:",
"def get_version(self):\r\n\r\n return self.versions[0].number",
"def get_version():\n return '.'.join(map(str, VERSION))",
"def do_version(self):\n return \"1.0.0\", True",
"def version():\n\n pass",
"def getDefaultVersion():\n return _libsbml.FbcExtension_getDefaultVersion()"
] | [
"0.7245187",
"0.668971",
"0.66882396",
"0.6665648",
"0.6570469",
"0.64901686",
"0.64345765",
"0.64268774",
"0.63672644",
"0.6360634",
"0.63367814",
"0.6328667",
"0.6325577",
"0.63126534",
"0.6253386",
"0.6247568",
"0.62471354",
"0.6241517",
"0.6227065",
"0.6226674",
"0.6218984",
"0.6200833",
"0.61821896",
"0.617639",
"0.617639",
"0.61438334",
"0.61386836",
"0.61378485",
"0.61342424",
"0.6113383"
] | 0.68623227 | 1 |
Get next micro version. | def bump_micro(self: _R, inc: int = 1) -> _R:
if not self.is_stable:
return self.get_stable().bump_micro(inc - 1)
return self._replace(
BaseVersion(
epoch=0,
release=(self.major, self.minor, self.micro + inc),
pre=None,
post=None,
dev=None,
local=None,
)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def next_version(major=False, minor=False, patch=True):\n try:\n r = Release.objects.latest()\n except Release.DoesNotExist:\n return Version('0.0.0')\n\n v = r.version\n if major:\n v = v.next_major()\n elif minor:\n v = v.next_minor()\n else:\n v = v.next_patch()\n return v",
"def next_version(self):\n try:\n release = self.release_set.order_by('-created')[0]\n except IndexError:\n return \"0.1.0\"\n\n major, minor, bugfix = release.version.split(\".\")\n\n return \"{}.{}.{}\".format(major, int(minor) + 1, bugfix)",
"def test_get_next_version(self):\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual(None, ver)",
"def next_version(v: str) -> str:\n vobj = Version(v)\n if vobj.is_prerelease:\n return str(vobj.base_version)\n vs = list(vobj.release)\n vs[1] += 1\n vs[2:] = [0] * len(vs[2:])\n s = \".\".join(map(str, vs))\n if vobj.epoch:\n s = f\"{vobj.epoch}!{s}\"\n return s",
"def micros() -> int:",
"def micro_Version(self):\n return tuple(map(ord, self._serial_io(b'\\x56', 2)[0:2]))",
"def test_getNextVersion(self):\n now = date.today()\n major = now.year - VERSION_OFFSET\n version = Version(\"twisted\", major, 9, 0)\n self.assertEquals(getNextVersion(version, now=now),\n Version(\"twisted\", major, 10, 0))",
"def test_get_next_version_MINOR(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR + 1, MAJOR, MINOR + 1, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual('%d.%d-%d' % (MAJOR, MINOR + 1, 0), ver)",
"def test_get_next_version_PATCH(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR, MAJOR, MINOR, PATCH + 1): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual('%d.%d-%d' % (MAJOR, MINOR, PATCH + 1), ver)",
"def test_getNextVersionAfterYearChange(self):\n now = date.today()\n major = now.year - VERSION_OFFSET\n version = Version(\"twisted\", major - 1, 9, 0)\n self.assertEquals(getNextVersion(version, now=now),\n Version(\"twisted\", major, 0, 0))",
"def test_get_next_version_MAJOR(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR + 1, 0, MAJOR + 1, 0, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual('%d.%d-%d' % (MAJOR + 1, 0, 0), ver)",
"def microversion(self, microversion):\n\n self._microversion = microversion",
"def increment(version):\n release_version = os.environ.get(\"RELEASE_VERSION\", None)\n if release_version is not None:\n return release_version\n if isinstance(version, LegacyVersion):\n msg = \"\"\"{0} is considered a legacy version and does not\n support automatic incrementing. Please bring your version\n numbering into PEP440 standards and then it can be\n automatically incremented.\n \"\"\"\n raise Exception(msg.format(version))\n release_type = os.environ.get(\"RELEASE_TYPE\", \"micro\")\n v = version._version\n # epoch\n epoch_name, epoch = VersionUtils.get_version_number(v, 0, None, \"!\")\n pre_name, pre = VersionUtils.get_version_number(v, 3, None, \"pre\")\n post_name, post = VersionUtils.get_version_number(v, 4, None, \"post\")\n dev_name, dev = VersionUtils.get_version_number(v, 2, None, \"dev\")\n _, major = VersionUtils.get_version_number(v[1], 0, 0)\n _, minor = VersionUtils.get_version_number(v[1], 1, None)\n _, micro = VersionUtils.get_version_number(v[1], 2, None)\n\n # Handle dev/pre/post\n if release_type == \"pre\":\n micro, post, pre = VersionUtils.process_pre(micro, post, pre)\n\n if release_type == \"post\":\n dev, post = VersionUtils.process_post(dev, post)\n\n if release_type == \"dev\":\n dev = VersionUtils.process_dev(dev)\n\n if release_type == \"micro\":\n dev, micro, minor, post, pre = VersionUtils.process_micro(\n dev, micro, minor, post, pre\n )\n\n if release_type == \"minor\":\n dev, micro, minor, post, pre = VersionUtils.process_minor(\n dev, micro, minor, post, pre\n )\n\n if release_type == \"major\":\n dev, major, micro, minor, post, pre = VersionUtils.process_major(\n dev, major, micro, minor, post, pre\n )\n\n # Handle Epoch\n if release_type == \"epoch\":\n dev, epoch, major, micro, minor, post, pre = VersionUtils.process_epoch(\n dev, epoch, major, micro, minor, post, pre\n )\n\n local = \"\".join(v[5] or []) or None\n\n version_list = [major, minor, micro]\n if release_type not in [\"epoch\", \"major\", \"minor\", \"micro\", \"pre\"]:\n version_list += list(v[1][3:])\n version_string = \".\".join([str(x) for x in version_list if x or x == 0])\n\n if epoch:\n version_string = str(epoch) + epoch_name + version_string\n if pre is not None:\n version_string = VersionUtils.calc_pre_version_string(\n pre, pre_name, version_string\n )\n if post is not None:\n version_string += \".\" + post_name + str(post)\n if dev is not None:\n version_string += \".\" + dev_name + str(dev)\n if local is not None:\n version_string += \".\" + str(local)\n\n return version_string",
"def test_get_next_version_PATCH99(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR, MAJOR, MINOR, 100): '',\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR + 1, MAJOR, MINOR + 1, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, 99)))\n self.assertEqual('%d.%d-%d' % (MAJOR, MINOR + 1, 0), ver)",
"def get_stable(self: _R) -> _R:\n return self._replace(\n BaseVersion(\n epoch=0,\n release=(self.major, self.minor, self.micro),\n pre=None,\n post=None,\n dev=None,\n local=None,\n )\n )",
"def get_prerelease_package_version(self, production: bool = False) -> str:\n rc = 1\n if describe := get_git_describe(CONFIG.mpy_path.as_posix()):\n ver, rc, _ = describe.split(\"-\")\n base = bump_version(Version(ver), minor_bump=True)\n rc = int(rc)\n return str(bump_version(base, rc=rc))\n else:\n raise ValueError(\"cannot determine next version number micropython\")",
"def test_get_next_version_MAJOR99(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (100, 0, 100, 0, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((99, MINOR, PATCH)))\n self.assertEqual(None, ver)",
"def get_version(self):\r\n return self._arm.get_version()",
"def current_version(self):\n try:\n return self.release_set.order_by('-created')[0].version\n except IndexError:\n return \"0.0.0\"",
"def get_version(self):\r\n\r\n return self.versions[0].number",
"def previous():\n releases_list = releases()\n try:\n return releases_list[-2]\n except IndexError:\n return None",
"def get_version():\n click.echo(get_current_version_number())",
"def get_version_by_number(version_manager, version_number, request):\n return version_manager.versions[version_number - 1]",
"def get_next_package_version(self, prod: bool = False) -> str:\n base = Version(self.pkg_version)\n if pypi_versions := get_pypi_versions(self.package_name, production=prod, base=base):\n self.pkg_version = str(pypi_versions[-1])\n return self.bump()",
"def get_version():\n return \"0.0.1 (prerelease prototype)\"",
"def test_get_next_version_MINOR99(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, 100, MAJOR, 100, 0): '',\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR + 1, 0, MAJOR + 1, 0, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, 99, 0)))\n self.assertEqual('%d.%d-%d' % (MAJOR + 1, 0, 0), ver)",
"def get_version():\n return 1",
"def get_version():\n return '%d.%d.%d' % version_info",
"def get_uni_version(self):\n version, major_version = None, None\n response = self.get_resource(category=VERSION, no_version=True)\n if response and response.get('version'):\n version = response['version']\n version_list = version.split('.')\n major_version = version_list[0][1:] + version_list[1]\n return version, major_version",
"def get_latest_version(model: str) -> str:\n if model in {\"small\", \"medium\", \"large\"}:\n model = f\"da_dacy_{model}_trf\"\n versions = [mdl.split(\"-\")[-1] for mdl in models_url if mdl.startswith(model)]\n versions = sorted(\n versions,\n key=lambda s: [int(u) for u in s.split(\".\")],\n reverse=True,\n )\n return versions[0]"
] | [
"0.6825203",
"0.6565859",
"0.6408749",
"0.6236738",
"0.6216092",
"0.60519856",
"0.60511297",
"0.6045051",
"0.5854707",
"0.5835003",
"0.58286273",
"0.577659",
"0.57726276",
"0.5756602",
"0.5756459",
"0.5703252",
"0.56749344",
"0.5653698",
"0.5628476",
"0.5624355",
"0.5614559",
"0.5603638",
"0.55932724",
"0.5578742",
"0.5536794",
"0.55328876",
"0.5523858",
"0.5514526",
"0.5513861",
"0.5492878"
] | 0.6671826 | 1 |
Get stable version from pre or post release. | def get_stable(self: _R) -> _R:
return self._replace(
BaseVersion(
epoch=0,
release=(self.major, self.minor, self.micro),
pre=None,
post=None,
dev=None,
local=None,
)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def next_version(major=False, minor=False, patch=True):\n try:\n r = Release.objects.latest()\n except Release.DoesNotExist:\n return Version('0.0.0')\n\n v = r.version\n if major:\n v = v.next_major()\n elif minor:\n v = v.next_minor()\n else:\n v = v.next_patch()\n return v",
"def get_version():\n return \"0.0.1 (prerelease prototype)\"",
"def current_version(self):\n try:\n return self.release_set.order_by('-created')[0].version\n except IndexError:\n return \"0.0.0\"",
"def is_release():\n return VERSION[-1]",
"def get_prerelease_package_version(self, production: bool = False) -> str:\n rc = 1\n if describe := get_git_describe(CONFIG.mpy_path.as_posix()):\n ver, rc, _ = describe.split(\"-\")\n base = bump_version(Version(ver), minor_bump=True)\n rc = int(rc)\n return str(bump_version(base, rc=rc))\n else:\n raise ValueError(\"cannot determine next version number micropython\")",
"def stable():\n env.branch = 'stable'",
"def get_release(self, is_vertebrate: bool) -> int:\n ext = \"/info/data/?\" if is_vertebrate else \"/info/eg_version?\"\n ret = retry(request_json, 3, self._url, ext)\n return int(ret[\"releases\"][0] if is_vertebrate else ret[\"version\"])",
"def next_version(self):\n try:\n release = self.release_set.order_by('-created')[0]\n except IndexError:\n return \"0.1.0\"\n\n major, minor, bugfix = release.version.split(\".\")\n\n return \"{}.{}.{}\".format(major, int(minor) + 1, bugfix)",
"def test_request_estable_version(self):\n current_stable_version = get_stable_version()\n self.assertIsNotNone(current_stable_version)",
"def get_latest_release_version():\n repo = GITHUB.get_user(GITHUB_OWNER).get_repo(GITHUB_REPO)\n latest_release_version = repo.get_latest_release().tag_name\n return latest_release_version",
"def get_next_package_version(self, prod: bool = False) -> str:\n base = Version(self.pkg_version)\n if pypi_versions := get_pypi_versions(self.package_name, production=prod, base=base):\n self.pkg_version = str(pypi_versions[-1])\n return self.bump()",
"def get_version():\n git_root = find_git_root(dirname(__file__))\n\n if git_root is not None:\n # Get the version using \"git describe\".\n cmd = \"git describe --tags --match [0-9]*\".split()\n try:\n version = subprocess.check_output(cmd).decode().strip()\n except subprocess.CalledProcessError:\n logger.exception(\"Unable to get version number from git tags\")\n exit(1)\n\n # PEP 386 compatibility\n if \"-\" in version:\n version = \".post\".join(version.split(\"-\")[:2])\n\n # Don't declare a version \"dirty\" merely because a time stamp has\n # changed. If it is dirty, append a \".dev1\" suffix to indicate a\n # development revision after the release.\n with open(os.devnull, \"w\") as fd_devnull:\n subprocess.call([\"git\", \"status\"], stdout=fd_devnull, stderr=fd_devnull)\n\n cmd = \"git diff-index --name-only HEAD\".split()\n try:\n dirty = subprocess.check_output(cmd).decode().strip()\n except subprocess.CalledProcessError:\n logger.exception(\"Unable to get git index status\")\n exit(1)\n\n if dirty != \"\":\n version += \".dev1\"\n\n return version\n\n else:\n try:\n return pkg_resources.working_set.by_key[\"graphql-validate\"].version\n except KeyError:\n return \"0.0.0-unreleased\"",
"def get_current_release():\n return _CURRENT_RELEASE",
"def select_release():\n release_version = unitdata.kv().get(OPENSTACK_RELEASE_KEY, None)\n if release_version is None:\n release_version = os_utils.os_release('keystone')\n unitdata.kv().set(OPENSTACK_RELEASE_KEY, release_version)\n return release_version",
"def is_stable(self) -> bool:\n return not self.is_prerelease",
"def get_increased_version():\n logs = get_rolling_log_history()\n\n if has_breaking_changes(logs):\n return get_increased_base_version(0)\n if has_features(logs):\n return get_increased_base_version(1)\n if has_fixes(logs):\n return get_increased_base_version(2)",
"def test_release_tag_for_dev_version(self) -> None:\n self.assertEqual(\"v42.12\", release_tag())",
"def get_base_version():\n if BASE_VERSION is None:\n return shell_output('git describe --tags --abbrev=0')\n return BASE_VERSION",
"def get_version():\n return 1",
"def get_latest_version(self):\n latest_release = self._http_client.get(self._github_repo + '/releases/latest')\n if not 'tag_name' in latest_release.json():\n return None\n version = latest_release.json()['tag_name']\n latest_release.close()\n return version",
"def latest_version(self):\n state = self.coordinator.data\n\n try:\n # fake a new update\n # return \"foobar\"\n return dict_get(state, \"firmware_update_info.base.version\")\n except KeyError:\n return None",
"def get_release_info(version='v1.1-dev', date='2021-07-22'):\n # go to the repository directory\n dir_orig = os.getcwd()\n os.chdir(os.path.dirname(os.path.dirname(__file__)))\n\n # grab git info into string\n try:\n cmd = \"git describe --tags\"\n version = subprocess.check_output(cmd.split(), stderr=subprocess.DEVNULL)\n version = version.decode('utf-8').strip()\n\n # if there are new commits after the latest release\n if '-' in version:\n version, num_commit = version.split('-')[:2]\n version += '-{}'.format(num_commit)\n\n cmd = \"git log -1 --date=short --format=%cd\"\n date = subprocess.check_output(cmd.split(), stderr=subprocess.DEVNULL)\n date = date.decode('utf-8').strip()\n except:\n pass\n\n # go back to the original directory\n os.chdir(dir_orig)\n return version, date",
"def get_new_version(version, IS_ARCHIVE, IS_RELEASE):\n\n if not IS_ARCHIVE and not IS_RELEASE:\n return version\n\n version_split = version.split('.')\n version_split_sigfigs = len(version_split)\n\n # ARCHIVE\n if IS_ARCHIVE:\n if version_split_sigfigs == 2:\n version_split[1] = str(0)\n else:\n del version_split[-1]\n version = \".\".join(version_split)\n return version\n\n # Release\n else:\n version_split[version_split_sigfigs-1] = str(int(version_split[version_split_sigfigs-1]) + 1)\n return \".\".join(version_split)",
"def get_git_version(abbrev=4):\n # Read in the version that's currently in RELEASE-VERSION.\n release_version = read_release_version()\n\n # First try to get the current version using \"git describe\".\n tag, count, _ = call_git_describe(abbrev)\n\n if count == '0':\n if tag:\n # Normal tagged release\n version = tag\n else:\n # This is an odd case where the git repo/branch can't find a tag\n version = \"0.dev0\"\n elif count:\n # Non-zero count means a development release after the last tag\n version = \"{}.dev{}\".format(tag, count)\n else:\n # Build count wasn't returned at all. Fall back on the value that's in\n # the packaged RELEASE-VERSION file\n version = release_version\n\n # If the current version is different from what's in the\n # RELEASE-VERSION file, update the file to be current.\n if version != release_version:\n write_release_version(version)\n\n # Finally, return the current version.\n return version",
"def increment(version):\n release_version = os.environ.get(\"RELEASE_VERSION\", None)\n if release_version is not None:\n return release_version\n if isinstance(version, LegacyVersion):\n msg = \"\"\"{0} is considered a legacy version and does not\n support automatic incrementing. Please bring your version\n numbering into PEP440 standards and then it can be\n automatically incremented.\n \"\"\"\n raise Exception(msg.format(version))\n release_type = os.environ.get(\"RELEASE_TYPE\", \"micro\")\n v = version._version\n # epoch\n epoch_name, epoch = VersionUtils.get_version_number(v, 0, None, \"!\")\n pre_name, pre = VersionUtils.get_version_number(v, 3, None, \"pre\")\n post_name, post = VersionUtils.get_version_number(v, 4, None, \"post\")\n dev_name, dev = VersionUtils.get_version_number(v, 2, None, \"dev\")\n _, major = VersionUtils.get_version_number(v[1], 0, 0)\n _, minor = VersionUtils.get_version_number(v[1], 1, None)\n _, micro = VersionUtils.get_version_number(v[1], 2, None)\n\n # Handle dev/pre/post\n if release_type == \"pre\":\n micro, post, pre = VersionUtils.process_pre(micro, post, pre)\n\n if release_type == \"post\":\n dev, post = VersionUtils.process_post(dev, post)\n\n if release_type == \"dev\":\n dev = VersionUtils.process_dev(dev)\n\n if release_type == \"micro\":\n dev, micro, minor, post, pre = VersionUtils.process_micro(\n dev, micro, minor, post, pre\n )\n\n if release_type == \"minor\":\n dev, micro, minor, post, pre = VersionUtils.process_minor(\n dev, micro, minor, post, pre\n )\n\n if release_type == \"major\":\n dev, major, micro, minor, post, pre = VersionUtils.process_major(\n dev, major, micro, minor, post, pre\n )\n\n # Handle Epoch\n if release_type == \"epoch\":\n dev, epoch, major, micro, minor, post, pre = VersionUtils.process_epoch(\n dev, epoch, major, micro, minor, post, pre\n )\n\n local = \"\".join(v[5] or []) or None\n\n version_list = [major, minor, micro]\n if release_type not in [\"epoch\", \"major\", \"minor\", \"micro\", \"pre\"]:\n version_list += list(v[1][3:])\n version_string = \".\".join([str(x) for x in version_list if x or x == 0])\n\n if epoch:\n version_string = str(epoch) + epoch_name + version_string\n if pre is not None:\n version_string = VersionUtils.calc_pre_version_string(\n pre, pre_name, version_string\n )\n if post is not None:\n version_string += \".\" + post_name + str(post)\n if dev is not None:\n version_string += \".\" + dev_name + str(dev)\n if local is not None:\n version_string += \".\" + str(local)\n\n return version_string",
"def get_version():\n\n with open('__init__.py') as f:\n for line in f.readlines():\n if '__version__' in line:\n apicem_version = line.strip().split(\"=\")[-1].strip(\" '\")\n if '__first_release_date__' in line:\n first_release_data_str = line.strip().split(\"=\")[-1].strip(\" '\")\n first_release_data = date(*[int(num) for num in first_release_data_str.split('.')])\n num_commits = get_cr_num(first_release_data)\n return '{apicem_version}.{num_commits}'.format(\n apicem_version=apicem_version, num_commits=num_commits)\n\n raise ValueError(\"could not read version\")",
"def get_distrib_version():\n distrib, version, codename = _get_release_infos() \n return version",
"def latest_release_get():\n try:\n return json_response.success({'version': version.latest_version()})\n except version.Error as e:\n return json_response.error(str(e)), 200",
"def get_version():\n click.echo(get_current_version_number())",
"def test_beta_to_stable(self):\n self.change_version(self.version_1_2_0, '1.2beta')\n self.change_status(self.version_1_2_0, amo.STATUS_BETA)\n\n version, file = self.get('1.2beta', self.version_int,\n self.app, self.platform)\n assert version == self.version_1_2_2"
] | [
"0.65761906",
"0.64690495",
"0.64539593",
"0.6419955",
"0.6369132",
"0.6336212",
"0.62359613",
"0.62115157",
"0.61312795",
"0.612242",
"0.6118793",
"0.61041874",
"0.6091029",
"0.6033105",
"0.59679747",
"0.59453046",
"0.5935324",
"0.5899036",
"0.58957833",
"0.58908105",
"0.588986",
"0.5887754",
"0.58852804",
"0.58735377",
"0.58613676",
"0.5860372",
"0.58529496",
"0.5849485",
"0.5845098",
"0.5843697"
] | 0.6867934 | 0 |
helper function for checkPass returns the first element of charList found that works for the password at index i if it fails to find a character at i, prints i and returns an empty string instead of returning i. | def findChar(username, url, charList, i):
for ch in charList:
if(checkPasswordCharacter(ch, username, url, index = i)):
return ch
#only runs if no ch in charList match:
# return i #oof, there's no match if i is out of bounds, e.g. len(password) < i
print("Missing: " + i) #so I know when it's not a match
return "" #return an empty string instead
# Note to self: should not return an _ because it'll match an _ if wildCards are true (default).
# If wildCards is false, this will just skip characters that don't match anything! | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def checkPass(username, url, charList, n):\n # dikt = {}\n password = \"\"\n for i in range(0, n):\n if(testPassword(password, username, url)):\n return password #password is found! \n # https://stackoverflow.com/questions/189645/how-to-break-out-of-multiple-loops-in-python\n ch = findChar(username, url, charList, i)\n # if(isinstance(ch, int))#if ch is int i, can't find a matching character at index i in password string \n # use try except instead of if(isinstance(ch, int)):\n # https://stackoverflow.com/questions/3501382/checking-whether-a-variable-is-an-integer-or-not\n try: \n password += ch\n except TypeError:\n # print(i)\n password += str(ch) #should be blank\n # raise ValueError(\"index i has no matching character\")\n return password #only reached if password is too long for the given n",
"def valid_password(lower, upper, letter, password):\n # Note the -1 to turn 1 indexing into 0 indexing\n matches = [idx for idx in (lower, upper) if password[idx - 1] == letter]\n return len(matches) == 1",
"def check_pass(password):\n # big_chain : length of longest chain of repeated symbols\n # c_start : index at which big_chain starts\n big_chain = 0\n cur_loc = 0\n for symb in password:\n if big_chain == 0:\n l_symb = symb\n cur_chain = 1\n big_chain = 1\n c_start = 0\n cur_c = cur_loc\n cur_loc += 1\n continue\n if symb == l_symb:\n cur_chain += 1\n else:\n cur_chain = 1\n cur_c = cur_loc\n if cur_chain > big_chain:\n big_chain = cur_chain\n c_start = cur_c\n cur_loc += 1\n l_symb = symb\n\n # return or repeat, need big_chain, c_start\n if big_chain < 2:\n return False\n if big_chain == 2:\n return True\n return (check_pass(password[:c_start])\n or check_pass(password[c_start+big_chain:]))",
"def solve_part_two(self):\n password = list(\"XXXXXXXX\")\n index = 0\n counter = 0\n while counter < 8:\n (s, found_index) = self.find_next_hash(index)\n index = found_index + 1\n offset = ord(s[5]) - ord(\"0\")\n # Offset invalid or password character already set previously?\n if offset >= 8 or password[offset] != \"X\":\n continue\n password[offset] = s[6]\n counter += 1\n return \"\".join(password)",
"def pass_check(user_found):\n password = ''\n while password != user_found[1]:\n password = stdiomask.getpass(prompt=\"Please enter your password: \", mask='*')\n pass1 = encrypter.encrypt_password(password)\n if user_found[1] == pass1:\n return \"\\nPassword match\\n\"\n else:\n print(\"\\nPassword do not match\\n\")",
"def find_pass(pass_list, service):\r\n for pass_info in pass_list:\r\n if pass_info[1] == service:\r\n return pass_info[2]",
"def SecondPart():\n return passwordChecker(data)",
"def solve_part_one(self):\n password = \"\"\n index = 0\n while len(password) < 8:\n (s, found_index) = self.find_next_hash(index)\n password += s[5]\n index = found_index + 1\n return password",
"def get_password():\n\n pwd = getpass(\"Enter your password below. It is used to protect your credentials.\\n\"\n \"The password must have a minimum length of 8 characters \"\n \"and can only contain alphanumeric characters and symbols.\\n\"\n \"Enter password (will be hidden): \")\n\n tries = 0 # Limit number of invalid attempts\n while True:\n if len(pwd) >= 8 and pwd.isascii() and pwd.isprintable() and ' ' not in pwd:\n if getpass(\"Confirm password: \") == pwd:\n return pwd\n else:\n print(\"Password mismatch!\")\n else:\n print(\"Invalid characters in password or too short!\")\n\n if tries == 3: return None\n pwd = getpass(\"\\nRe-enter password: \")\n tries += 1",
"def find_password( door_id ):\n\n\tpassword = [ '', '', '', '', '', '', '', '' ]\n\tincrementor = 0\n\t\n\tfor _i in range( 8 ):\n\t\tchar = ''\n\t\twhile not char:\n\t\t\t#_do_stupid_movie_password_animation( password, _i )\n\n\t\t\tinput = door_id + str( incrementor )\n\t\t\tm = hashlib.md5( )\n\t\t\tm.update( input.encode( 'utf-8' ) )\n\t\t\thash = m.hexdigest( )\n\n\t\t\tif hash.startswith( '00000' ):\n\t\t\t\tloc = hash[ 5 ]\n\t\t\t\tchar = hash[ 6 ]\n\t\t\t\tif loc.isdigit( ):\n\t\t\t\t\tloc = int( loc )\n\t\t\t\t\tif 0 <= loc <= ( len( password ) - 1 ) and not password[ loc ]:\n\t\t\t\t\t\tpassword[ loc ] = char\t\n\t\t\t\t\telse:\n\t\t\t\t\t\tchar = ''\n\t\t\t\telse:\n\t\t\t\t\tchar = ''\n\t\t\t\n\t\t\tincrementor += 1\n\n\tpassword = ''.join( password )\n\treturn password",
"def get_password(args):\n for password in args:\n heashed=hash_password(password)\n print(heashed)\n # checked=check_password(heashed)",
"def passwd_prompt():\n\n print(\"Passwords MUST contain AT LEAST: one lower-case letter,\" \n \"one number, one symbol, and be a MINIMUM of 8 characters in length,\"\n \"e.g. r!ght2oE\")\n\n while True:\n\n passy = getpass.getpass(prompt=\"Enter password for user: \")\n confirm_passy = getpass.getpass(prompt=\"To confirm, \" \\\n \"re-enter password: \")\n\n # check for the following conditions: \n # user input matches\n # length of input is at least 8 characters\n # input contains at least 1 number \n # input contains at least 1 letter \n # input contains at least 1 symbol \n \n if passy != confirm_passy \\\n or len(passy) <8 \\\n or not re.search('\\d', passy) \\\n or not re.search(r\"[a-z]\",passy) \\\n or not re.search(r\"[ !#$%&'()*+,-./[\\\\\\]^_`{|}~\"+r'\"]', passy): \n \n print(TRY_AGAIN)\n continue \n \n else:\n print(\"Password meets complexity requirement. Continuing...\") \n return passy",
"def password(self) -> str:",
"def passWord(pwd):\n pwdLen = len(pwd)\n if pwdLen < 4:\n raise Exception(\"The password is too short.\")\n if pwdLen > 8:\n raise Exception(\"tHE password is too long\")\n else:\n print('the length of the password is correct.')",
"def find_valid_passwords(values: List[str]) -> int:\n search_reg = re.compile(\n r\"\\b(?P<first>[0-9]+)-(?P<second>[0-9]+)\\s(?P<letter>[a-z]):\\s(?P<password>[a-z]+)\")\n valid_password_count = 0\n\n for value in values:\n results = search_reg.search(value)\n target_char = results.group(\"letter\")\n password = results.group(\"password\")\n first_index = int(results.group(\"first\")) - 1\n second_index = int(results.group(\"second\")) - 1\n\n if (target_char == password[first_index]) != (target_char == password[second_index]):\n valid_password_count += 1\n\n return valid_password_count",
"def FirstPart(): \n return passwordChecker_incorrect(data)",
"def get_password(wordlen, digitlen, words, strength):\n\n while True:\n\n try:\n w = words.pop().capitalize()\n except IndexError:\n sys.exit(\"Unable to get a sufficiently strong password\")\n\n s = np.random.choice(SPECIAL_CHARS)\n i = np.random.randint(0, 10**digitlen)\n\n comp = [w, f\"{i:0{digitlen}d}\", s, s]\n np.random.shuffle(comp)\n pw = ''.join(comp)\n\n # pw = str(f\"{s}{w}{i:0{digitlen}d}{s}\")\n stats_pw = PasswordStats(pw)\n\n if stats_pw.strength() >= strength:\n return pw, stats_pw",
"def analyze_password(password):\n vowels = number_of_vowels(password)\n if valid_password(password) is True:\n result = password + \" is a valid password and contains \" + str(vowels) + \" vowels.\"\n else:\n result = password + \" is not a valid password and contains \" + str(vowels) + \" vowels.\"\n return result",
"def get_pass(self, item):\n text = str(self.get_contents(item), encoding=\"utf-8\")\n lines = text.split(\"\\n\")\n password = lines[0]\n return password",
"def num_pw_found(byte_string):\n hasher = hashlib.sha1()\n hasher.update(byte_string)\n digest = hasher.hexdigest().upper()\n pw_list = requests.get('https://api.pwnedpasswords.com/range/{}'.format(digest[:5]))\n for line in pw_list.text.split('\\n'):\n info = line.split(':')\n if info[0] == digest[5:]:\n return int(info[1])\n return 0",
"def testPassword(cryptPass, dictionaryFile):\n #salt = cryptPass[0:2]\n salt = crypt.mksalt(crypt.METHOD_SHA512) # Updated for SHA512 encrypted passwords\n dictFile = open(dictionaryFile, 'r')\n for word in dictFile.readlines():\n word = word.strip('\\n')\n cryptWord = crypt.crypt(word, salt)\n \n if cryptWord == cryptPass:\n print('[+] Found Password: ' + word + '\\n')\n return\n print('[-] Password Not Found.\\n')\n return",
"def matchpassword(username, password): # create function called matchpassword\n \n List = [] # Initialize list\n\n try:\n f = open(\"C:\\Portable Python 3.2.5.1\\password.txt\",\"r\") # opens password.txt\n List = f.readlines() # Reads password.txt into a list\n f.close() # Closes password.txt file\n except IOError:\n print(\"I/O error: Unable to read in File f\") # Exception if I/O Error\n\n for x in range(0,len(List),2): # Loop thru list to determine if match\n Listlower = List[x].lower()\n if((username.lower() + '\\n' == Listlower) and (password + '\\n' == List[x + 1])):\n return 'True'\n else:\n continue\n return 'False'",
"def password_generator(password_lenght):\r\n password = \"\"\r\n\r\n try:\r\n if password_lenght >=1:\r\n for i in range(password_lenght):\r\n choice = random.choice(symbols)\r\n password += str(choice)\r\n print(f\"Your password is: {password} \\nTnank you!\")\r\n return password\r\n else:\r\n return 0\r\n except Exception:\r\n pass",
"def extremely_stupid_naive_brute_force_crap():\n keystrokes = [l.strip() for l in open(\"keylog.txt\")]\n for i in range(1000, 10000000):\n if i % 10000 == 0:\n print i\n password = str(i)\n if all(is_subsequence(password, keys) for keys in keystrokes):\n print password\n break",
"def is_valid_password_v2(password):\n\n low = password[\"letter\"] == password[\"password\"][password[\"low\"] - 1]\n high = password[\"letter\"] == password[\"password\"][password[\"high\"] - 1]\n\n return xor(low, high)",
"def iterate_pword(current_password):\n\n num = _pword_to_num(current_password) # Turn password into list of ints\n for idx in reversed(range(len(num))):\n char_ord = num[idx]\n if char_ord != 122:\n char_ord += 1\n num[idx] = char_ord\n break\n else:\n char_ord = 97\n num[idx] = char_ord\n return _num_to_pword(num)",
"def getpassword(description = \"\"):\n\tif (description != \"\"): \n\t\tsys.stdout.write (\"%s\\n\" % description)\n\t\t\n\tpassword1 = getpass.getpass(\"Password: \");\n\tpassword2 = getpass.getpass(\"Password (confirm): \");\n\n\tif (password1 == password2):\n\t\treturn password1\n\telse:\n\t\tsys.stdout.write (colors.ORANGE + \"[Warning] Password did not match, please try again\" + colors.NO + \"\\n\")\n\t\treturn getpassword()",
"def check_pwd(password: str) -> bool:\n # if len(password) > 0 and password[0].isdigit():\n # upper: List[Any] = [letter for letter in password if letter.isupper()]\n # lower: List[Any] = [letter for letter in password if letter.islower()]\n # return len(upper) > 1 and len(lower) > 0\n # else:\n # return False\n # Professor's solution\n return len(password) >= 4 \\\n and sum([1 for c in password if c.isupper()]) >= 2 \\\n and sum([1 for c in password if c.islower()]) >= 1 \\\n and password[0].isdigit()",
"def generate_password():\n selection = string.ascii_letters + string.digits\n\n while True:\n password = \"\".join(secrets.choice(selection) for i in range(16))\n\n if (\n any(c.isupper() for c in password)\n and any(c.islower() for c in password)\n and any(c.isdigit() for c in password)\n ):\n break\n\n return password",
"def _get_password_error_msg(password):\n # At least one letter and one digit\n if not any(c.isalpha() for c in password):\n return (\"The new password must contain at least one letter\", 'no_letter_in_password')\n if not any(c.isdigit() for c in password):\n return (\"The new password must contain at least one digit\", 'no_digit_in_password')\n return None"
] | [
"0.71826273",
"0.6203832",
"0.6179515",
"0.6156986",
"0.61567867",
"0.60574234",
"0.6030019",
"0.59959686",
"0.59700435",
"0.5946914",
"0.5921345",
"0.5909927",
"0.5908256",
"0.58809274",
"0.5878235",
"0.5816463",
"0.5804979",
"0.5787845",
"0.5780203",
"0.57457215",
"0.57276505",
"0.57061714",
"0.56991327",
"0.5696502",
"0.56824",
"0.56646657",
"0.56593484",
"0.554374",
"0.55170935",
"0.54889387"
] | 0.77611613 | 0 |
List of characters in database names | def makeDatabaseList():
charList = []
for ch in lower:
# ch = str(ch)
if(characterInDatabaseName(ch, url)):
charList.append(ch)
for ch in numbers:
ch = str(ch)
if(characterInDatabaseName(ch, url)):
charList.append(ch)
for ch in special:
ch = str(ch)
if(characterInDatabaseName(ch, url)):
charList.append(ch)
for ch in other:
ch = str(ch)
if(characterInDatabaseName(ch, url)):
charList.append(ch)
if(caseSensitive):
for ch in upper:
# ch = str(ch)
if(characterInDatabaseName(ch, url)):
charList.append(ch, url)
if(wildCards):
for ch in wildCards:
# ch = str(ch)
if(characterInDatabaseName(ch, url)):
charList.append(ch, url)
return charList | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def makeDatabaseNamesList(n, ):",
"def get_database_names(self) -> Iterable[str]:\n custom_database_name = self.service_connection.__dict__.get(\"databaseName\")\n\n database_name = self.service_connection.__dict__.get(\n \"database\", custom_database_name or \"default\"\n )\n # By default, set the inspector on the created engine\n self.inspector = inspect(self.engine)\n yield database_name",
"def _get_database_name(database):\n # make sure the return is only one data type\n filenames = []\n if database is not None:\n if not isinstance(database, list):\n database = [database]\n for db in database:\n filenames += glob.glob(db)\n\n return filenames",
"def _get_db_names(self, dbs, strict=True):\n dbs = utils.coerce_to_list(dbs)\n db_names = [utils.get_name(db) for db in dbs]\n if strict:\n good_dbs = self.instance.list_databases()\n good_names = [utils.get_name(good_db) for good_db in good_dbs]\n bad_names = [db_name for db_name in db_names\n if db_name not in good_names]\n if bad_names:\n bad = \", \".join(bad_names)\n raise exc.NoSuchDatabase(\"The following database(s) were not \"\n \"found: %s\" % bad)\n return db_names",
"def _get_char_names(self):\n return [device.get_char_name() for\n device in self.all_devices]",
"def donor_names():\n return list(donor_db.keys())",
"def getAllName(table):\n\ttry:\n\t\tcon = sqlite3.connect('PampDb.db')\n\t\tcur = con.cursor()\n\t\tcur.execute(\"SELECT * FROM \" + table)\n\t\tnames = cur.fetchall()\n\t\tcon.commit()\n\t\tcon.close()\n\t\treturn names\n\texcept:\n\t\tprint('Could not run function getAllName from DbController')",
"def getDatabaseName(self):\n return f\"n{self.name.capitalize()}\"",
"def donor_names():\n return donor_db.keys()",
"def get_available_databases() -> List[str]:\r\n\tcur = psycopg2.connect(dbname='postgres').cursor()\r\n\tcur.execute(\"SELECT datname FROM pg_database WHERE datistemplate=FALSE;\")\r\n\treturn [row[0][:-6] for row in cur if row[0].endswith('wikidb')]",
"def dbdescs(data, dbname):\n # pylint: disable=bad-continuation\n return {\n 'admin': onedesc(data, dbname, 'admin', 'rw'),\n 'user': onedesc(data, dbname, 'user', 'rw'),\n 'viewer': onedesc(data, dbname, 'viewer', 'ro')\n }",
"def get_all_collection_names(self):\n select_list = [SQLBinaryExpr(SQLColumnExpr(SQLTableExpr(TABLE_NAME_COLL), COL_NAME_COLL_NAME),\n OP_AS, COL_NAME_COLL_NAME)]\n\n entries = self.select_generic_data(select_list=select_list, table_list=[TABLE_NAME_COLL])\n return [entrie[COL_NAME_COLL_NAME] for entrie in entries]",
"def donor_names():\n names = list()\n for name in donor_db:\n names = names + [name[0]]\n return names",
"def db_collations_choices(self):\n # To avoid pre-mature initialization of db-context.\n from django.db import connection\n\n with connection.cursor() as cursor:\n cursor.execute(\"SELECT collname, collcollate FROM pg_collation\")\n rows = cursor.fetchall()\n return ((name, \"{} ({})\".format(name, collate)) for name, collate in rows)",
"def names(self) -> list[str]:",
"def get_char_names(charlist, caller):\n watch_list = caller.db.watching or []\n verbose_where = False\n if caller.tags.get(\"verbose_where\"):\n verbose_where = True\n return \", \".join(\n char_name(char, verbose_where, watch_list)\n for char in charlist\n if char.player\n and (not char.player.db.hide_from_watch or caller.check_permstring(\"builders\"))\n )",
"def do_list(self, line):\n\t\tx = [i for i in self.client.list_databases() if i['name'] not in ['admin','config','line','local','mongoengine_test','pymongo_test']]\n\t\tfor db in x:\n\t\t\tprint(db['name'])",
"def getTableNames(self):\n\tif not self.dbNames:\n\t # get db table names from DB\n\t if self.dbType==\"sqlite\":\n\t query=\"SELECT name FROM sqlite_master WHERE type='table';\"\n\t elif self.dbType==\"mysql\":\n\t query=\"SHOW TABLES\"\n\t self.startTxn(\"SQLUtil.__init__\")\n\t tup=self.fetchAll(query)\n\t self.endTxn(\"SQLUtil.__init__\")\n\t for item in tup:\n\t self.dbNames.append(item[0])\n\treturn self.dbNames",
"def get_available_databases():\n return map(\n lambda (key, value): (key, value[\"description\"]),\n DumpConverter.DATABASES.items())",
"def get_names_from_Seq_db(seq_db):\r\n names = []\r\n names_abudance_removed = []\r\n db = open(seq_db, \"r\")\r\n for seq_record in SeqIO.parse(db, \"fasta\"):\r\n if seq_record.id.endswith(\"_1\"):\r\n names.append(seq_record.id)\r\n names_abudance_removed.append((\"_\").join(\r\n seq_record.id.split(\"_\")[:-1]))\r\n else:\r\n names_abudance_removed.append(seq_record.id)\r\n names.append(seq_record.id + \"_1\")\r\n db.close()\r\n return names, names_abudance_removed",
"def colNames_string(self):\n # SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'some_table';\n return \"SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = \"",
"def get_usernames(self) -> list:\n db_list = list(self.cursor.execute('SELECT * FROM sqlite_master'))\n users = [db_list[i][1] for i in range(0, len(db_list), 2)]\n return users",
"def get_db_object_name(name):\n\n # Default output list\n out_list = ['lemma', 'dbo', 'udb_plt']\n\n # Replace the elements of out_list if and only if there exists a\n # replacement for it\n parts = name.split('.')\n for (i, j) in enumerate(range(len(parts) - 1, -1, -1)):\n if parts[j]:\n out_list[(len(out_list) - 1) - i] = parts[j]\n return tuple(out_list)",
"def getDatabaseName(self):\n raise NotImplementedError",
"def scrub(self, table_name):\n\n return ''.join( chr for chr in table_name if chr.isalnum() )",
"def db_name(self):\n return self._db_name",
"def get_db_format(text):\n db_text = \"\"\n for t in text.split(\" \"):\n db_text += t.title()\n return db_text",
"def testCMSNametoList(self):\n result = self.mySiteDB.cmsNametoList(\"T1_US*\", \"SE\")\n self.assertItemsEqual(result, [u'cmsdcadisk01.fnal.gov'])",
"def queryList():\n #f = open(\"/var/log/scidbpy_log.txt\",\"w+\")\n #f.write(\"starting queryList\")\n\n header, rows = querySciDB(\"list('arrays')\")\n names = [row[1].translate(None, \"\\\"\") for row in rows]\n\n return names",
"def get_all_bank_names() -> List[str]:\n\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select name from bank\"\n cursor.execute(query)\n data = cursor.fetchall()\n r_list = [x[0] for x in data]\n db.disconnect()\n return r_list"
] | [
"0.6628727",
"0.5976508",
"0.5936507",
"0.5901102",
"0.58635503",
"0.5853042",
"0.5845213",
"0.58389676",
"0.58144695",
"0.58012706",
"0.5732453",
"0.5721888",
"0.5696421",
"0.5669424",
"0.56576335",
"0.5632975",
"0.56264454",
"0.56206435",
"0.5605173",
"0.5598442",
"0.5570138",
"0.5568779",
"0.5564397",
"0.55643475",
"0.5546304",
"0.55416715",
"0.5533799",
"0.5532949",
"0.55016935",
"0.5472257"
] | 0.7128734 | 0 |
returns list of characters that appear in any username | def userNameCharacters(url, tableName, caseSensitive = False, wildCards = True):
"""
sqlzoo characters
['a', 'c', 'd', 'e', 'h', 'i', 'j', 'k', 'n', 'o', 'p', 'r', 't', 'w', '_', '%']
"""
lst = []
for ch in special:
if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = "no index")):
lst.append(ch)
for ch in lower:
if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = "no index")):
lst.append(ch)
for ch in numbers:
if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = "no index")):
lst.append(ch)
for ch in other:
if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = "no index")):
lst.append(ch)
if(caseSensitive):
for ch in upper:
if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = "no index")):
lst.append(ch)
if(wildCards):
for ch in wildcards:
lst.append(ch) #it'll match if there's users
return lst | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def all_users():\n\treturn [unicode(name[:-4]).lower() for name in os.listdir(os.path.join(WORLD_DIR, 'players'))]",
"def check_user_name(self, username):\n usernames = []\n for user in self.__users:\n if user['username'] == username:\n usernames.append(user)\n return usernames",
"def check_username(username):\n if username:\n if not re.match('[a-z]', username[0]):\n return ['username_error_badfirstchar']\n # Technically both these conditions might hold. However, the common\n # case seems to be that somebody starts typing their name beginning\n # with an upper-case letter, and it's probably sufficient to just\n # issue the first error in that case.\n elif not re.match('^[-a-z0-9_]+$', username):\n return ['username_error_badchar']\n return []",
"def checkUsernameSequences(n, ch, url, tableName, minLen = 1, maxLen = 2):\n if(minLen == 1):\n strLst = ch\n # assumes all of ch is a match\n else:\n strLst = []\n for k in range(minLen, maxLen + 1):\n lst = generateSubSequences(k, ch)\n sublst = [x for x in lst if userNameLike(x, url, tableName)]\n# list comprehensions with conditions:\n# https://stackoverflow.com/questions/6475314/python-for-in-loop-preceded-by-a-variable\n strLst += sublst\n return strLst",
"def containing(letter, text):\n return([word for word in text if word.count(letter) >= 1])",
"def clean_user_input(self, user_input):\n legal_chars = re.compile(r'^[a-z0-9]$')\n return filter(lambda c: re.match(legal_chars, c), user_input.lower())",
"def find_single_letters(question):\n if re.findall(r\"\\bletter\\b|\\bletters\\b\", question):\n matches = re.findall(r\"\\b[A-Za-z]\\b\", question)\n\n return [m for m in matches]\n\n return []",
"def filt(seq, lst):\n regex = \"(\" + \")|(\".join(seq) + \")\"\n regex = re.compile(regex)\n slst = list(filter(regex.search, lst))\n return slst\n\n\n # still need a checkUsername function ",
"def chars(self, irc, msg, args, channel, username):\n if not self.registryValue('full_access', channel):\n irc.reply('Concord denies you access on this channel!')\n return\n\n user = self._sql(\"\"\"\n SELECT * FROM accounting_capsuler\n WHERE username=%s\"\"\", [username])\n if not user:\n irc.error('Could not find user \"{0}\"'.format(username))\n return\n\n chars = self._sql(\"\"\"\n SELECT * FROM character_charactersheet\n WHERE owner_id=%s\"\"\", [user['id']], single=False)\n\n if len(chars) == 0:\n irc.reply('User \"{0}\" has 0 characters registered'.format(user['username']),\n prefixNick=False)\n else:\n output = []\n for char in chars:\n output.append('{0} [{1}]'.format(\n char['name'],\n char['corporationName']\n ))\n irc.reply('Found {0} characters: {1}'.format(\n len(chars),\n \", \".join(output)\n ), prefixNick=False)",
"def get_blocked_usernames_list():\n return []",
"def makeList(username, url, caseSensitive = False, wildCards = True):\n charList = []\n for ch in lower:\n # check for ch in \n if(checkPasswordCharacter(str(ch), username, url)):\n charList.append(str(ch))\n print(ch)\n for ch in numbers:\n if(checkPasswordCharacter(str(ch), username, url)):\n charList.append(str(ch))\n print(ch)\n for ch in special:\n if(checkPasswordCharacter(str(ch), username, url)):\n charList.append(str(ch))\n print(ch)\n for ch in other:\n if(checkPasswordCharacter(str(ch), username, url)):\n charList.append(str(ch))\n print(ch)\n if(caseSensitive):\n for ch in upper:\n if(checkPasswordCharacter(str(ch), username, url)):\n charList.append(str(ch))\n print(ch)\n if(wildCards):\n for ch in wildcards:\n if(checkPasswordCharacter(str(ch), username, url)):\n charList.append(str(ch))\n print(ch)\n return charList",
"def get_pure_user_words(user_words: List[str], letters: List[str], words_from_dict: List[str]) -> List[str]:\r\n unknown_words = []\r\n for wordd in user_words:\r\n if wordd not in words_from_dict:\r\n unknown_words.append(wordd)\r\n forbidden_letters = [i for i in string.ascii_lowercase]\r\n for i in letters:\r\n try:\r\n forbidden_letters.remove(i)\r\n except:\r\n pass\r\n word_list = []\r\n letstr = \"\"\r\n for i in letters:\r\n letstr += i\r\n for word in unknown_words:\r\n if len(word) >= 4 and len(word) <= 9:\r\n count = 0\r\n for let in word:\r\n if let in forbidden_letters:\r\n count += 1\r\n if word.count(let) > letstr.count(let):\r\n count += 1\r\n if letters[4] not in word:\r\n count += 1\r\n if count == 0:\r\n word_list.append(word)\r\n return word_list",
"def invalid_username(username):\n word_letters = re.sub('[^a-zA-Z-0-9]+', '', str(username))\n if any(item.isalpha() for item in word_letters):\n return False\n return True",
"def having_letters(self):\r\n self.letter=[chr(c) for c in range(97, 123)]\r\n self.owning_letters=list()\r\n i=0\r\n while i<7:\r\n temp=random.choice(self.letter)\r\n if temp not in self.owning_letters:\r\n self.owning_letters.append(temp)\r\n i+=1\r\n else:\r\n continue\r\n return self.owning_letters",
"def get_unique_characters(text):\n return sorted(list(set(text)))",
"def search_for_letters(phrase:str, letters:str='aeiou') -> set:\n return set(letters).intersection(set(phrase))",
"def _get_unique_chars(self, data_string):\n unique_chars = list(set(data_string))\n return unique_chars",
"def listusers():\n allusers = []\n with open('/etc/passwd', 'r') as pw:\n for l in pw.readlines():\n allusers.append(l.split(':')[0])\n users = [ d for d in os.listdir(\"/home\") if d in allusers ]\n return(users)",
"def remaining():\n return([letter for letter in alphabet if letter not in [char for char in list(decoded_dict.values()) if char.isupper() == True]])",
"def example_usernames():\n return [\"A\", \"B\", \"C\"]",
"def other_chars(self):\n return [sign for sign in re.findall(r'[^\\w\\s]', self.text)]",
"def remaining_en():\n return([letter for letter in alphabet if decoded_dict[letter].upper() == letter])",
"def other_chars(self):\n return re.findall(r'[,.!?_\\':;/#%*\\=@\"]', self.text)",
"def _get_consonants(sequence: str) -> list:\n consonants = []\n for char in sequence:\n if char in CONSONANTS:\n consonants.append(char)\n return consonants",
"def username_validation(username):\n errors = []\n #Check if Username exists\n if(username_present(username)):\n errors.append(\"Användarnamnet finns redan.\")\n #Username needs to be longer then 3 chars\n if(len(username) <= 3):\n errors.append(\"Användarnamnet mäste vara 3 tecken eller längre.\")\n\n return errors",
"def check(self, text):\n p = self.d\n i = 0\n j = 0\n result = []\n ln = len(text)\n while i + j < ln:\n t = text[i + j].lower()\n # print i,j,hex(ord(t))\n if not (t in p):\n j = 0\n i += 1\n p = self.d\n continue\n p = p[t]\n j += 1\n # print p,i,j\n if chr(11) in p:\n p = self.d\n result.append(text[i:i + j])\n i = i + j\n j = 0\n return result",
"def normalize_username(username):\n\n regex = compile(UnicodeUsernameValidator.regex)\n normalized_username = \"\"\n for char in username:\n if not regex.match(char):\n continue\n normalized_username += char\n return normalized_username",
"def _clean_term(self, term):\n return filter(lambda char: char in allowed_chars, term)",
"def userNames(lst, url, tableName):\n n = len(lst)\n # https://docs.python.org/3/library/itertools.html#itertools.product\n # https://stackoverflow.com/questions/3034014/how-to-apply-itertools-product-to-elements-of-a-list-of-lists\n lst2 = list(itertools.product(*lst))\n lst3 = list(map(\"\".join, lst2))\n #\n # Maybe use checkUsernameSequences here,\n # then add a check to reduce the amount of possibilities before building lst?\n #\n\n seq = checkUsernameSequences(n, lst, url, tableName, minLen = 2, maxLen = 2)\n # does not include the single characters since minLen > 1\n\n lst4 = filt(seq, lst3)\n \"\"\"# next time:\n find matching strings. That should (hopefully) reduce the space to search. \n REMEMBER, this filtering will miss all single character usernames!!!\n\n https://docs.python.org/3/library/re.html#regular-expression-syntax\n https://stackoverflow.com/questions/3640359/regular-expressions-search-in-list\n https://stackoverflow.com/questions/3040716/python-elegant-way-to-check-if-at-least-one-regex-in-list-matches-a-string\n https://stackoverflow.com/questions/19300020/python-match-a-string-with-regex\n https://stackoverflow.com/questions/37974047/if-any-strings-in-a-list-match-regex\n\"\"\"\n\n lst5 = [x for x in lst4 if checkUsername(x, url, tableName)]\n # lst = list(map(checkUsername, lst2))\n return lst5",
"def get_usernames(self, selector: Optional[Callable[[User], bool]]=None) -> Set[str]:\n return set([u.name for u in self.iter_users(selector)])"
] | [
"0.65302527",
"0.6494948",
"0.63683933",
"0.6244168",
"0.6195335",
"0.61639106",
"0.61376554",
"0.6136397",
"0.6090331",
"0.6048526",
"0.6043042",
"0.6035452",
"0.60292345",
"0.6010415",
"0.59815353",
"0.5924206",
"0.5911422",
"0.58786315",
"0.5807217",
"0.57925546",
"0.578954",
"0.5780176",
"0.5751811",
"0.573854",
"0.5725921",
"0.5721554",
"0.5720262",
"0.5706431",
"0.56956553",
"0.5682372"
] | 0.68477184 | 0 |
generates all subsequences of ch with length k | def generateSubSequences(k, ch):
seq = ["".join(c) for c in itertools.product(ch, repeat = k)]
# discussion about the best way to do this:
# https://stackoverflow.com/questions/7074051/what-is-the-best-way-to-generate-all-possible-three-letter-strings
return seq | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_kmers(seq, k):\n\n return [seq[i:i+k] for i in range(len(seq)-k+1)]",
"def cut_kmer(sequence, k_mer):\n for i in range(0, len(sequence)-k_mer + 1):\n yield sequence[i:i+k_mer]",
"def kmer_list(s, k):\n kmer = []\n n = len(s)\n # n-k+1 is the available range of values or probablities.\n for x in range(0, n-k+1):\n kmer.append(s[x:x+k])\n return kmer",
"def __permutation(orgset, k):\n if k == 1:\n for i in orgset:\n yield (i,)\n elif k > 1:\n for i, x in enumerate(orgset):\n # iterates though to near the end\n for s in __permutation(orgset[:i] + orgset[i + 1 :], k - 1):\n yield (x,) + s",
"def pattern_list(k):\r\n p_list=[]\r\n for i in list(itertools.product('ACGT', repeat=k)):\r\n x = ''.join(i)\r\n p_list.append(x)\r\n return p_list",
"def get_kmers(seq,k=2):\n pair_list = []\n for i in range(0,len(seq),k):\n pair_list.append(str(seq)[i:i+k])\n return pair_list",
"def kmer_set(s, k):\n kmer = set([])\n n = len(s)\n #n-k+1 is the available range of values or probablities.\n for x in range(0, n - k + 1):\n kmer.add(s[x:x + k])\n return kmer",
"def get_subsets(l, k):\n if k == 0:\n return [[]]\n else:\n res = []\n for i in range(len(l)):\n rest_subsets = get_subsets(l[i + 1:], k - 1)\n for subset in rest_subsets:\n subset.insert(0, l[i])\n res += rest_subsets\n return res",
"def seq_reconstruct(k, d, kmers):\n seq = []\n for p in kmers:\n seq.append(p[0])\n for i in reversed(range(1, 2 * k + d - 1)):\n print(i)\n seq.append(kmers[-i][-1])\n return \"\".join(seq)",
"def shingle(s, k):\n k = min(len(s), k)\n for i in range(len(s) - k + 1):\n yield s[i:i+k]",
"def cut_kmer(sequence, kmer_size):\n for i in range(len(sequence)-kmer_size+1):\n yield sequence[i:i+kmer_size]",
"def get_all_pandigitals(i, k):\n l = range(i, k + 1)\n return itertools.permutations(l)",
"def get_paths_of_length_k(subpaths, k):\r\n subpaths_of_length_k = [i for i in subpaths if len(\r\n i) == k] # all k-length subpaths\r\n subpaths = [i for i in subpaths if len(i) != k] # remove k-length subpaths\r\n return subpaths_of_length_k, subpaths",
"def get_subsequence(a, k):\n if k > len(a):\n return None\n dp = [0] * len(a)\n aux = [inf] * (k + 1)\n aux[0] = -inf\n high = 0\n for i in range(len(a)):\n dp[i] = bisect_left(aux, a[i])\n aux[dp[i]] = min(aux[dp[i]], a[i])\n high = max(high, dp[i])\n if high == k:\n return aux[1:]\n return None",
"def iter_strings_k(n, k, m):\n # initial state -- all zeros\n state = np.zeros((n,), dtype=int)\n\n if k == 0:\n # that was it (!)\n return\n\n while True:\n #print(f\"next state is {state=}\")\n yield state\n\n # Update to next state. Idea is to count and carry as usual, except if\n # there are already k nonzeros in which case we count and carry by\n # ignoring all the trailing zeros. This is the algorithm described here\n # - https://stackoverflow.com/a/10458380/1694896 - adapted from bits to\n # base-m \"mits\"\n if np.count_nonzero(state) < k:\n _add_and_carry_in_place(state, m)\n continue\n\n # there are k nonzeros already, find first nonzero from least\n # significant end. See https://stackoverflow.com/a/52911347/1694896\n last_nonzero = np.max(np.nonzero(state))\n # and increment that one\n _add_and_carry_in_place(state, m, last_nonzero)\n if not np.any(state):\n # end of iteration reached, as we've gone back to the all-zero\n # state.\n return",
"def find_partitions(V,k):\n k_subs = k_subset(V,k)\n k_subs = uniq_subsets(k_subs)\n\n return k_subs",
"def TAoCPpermutation(n,k):\n perms = []\n for subset in itertools.combinations(range(n), k):\n A = []; B = []; C = []; min = 0; j = 0; up = 0\n for i in xrange(n):\n if(j>=k or i != subset[j]):\n B.append(i)\n up +=1\n else:\n up -=1\n j += 1\n if(up < min):\n min = up\n B.append(i)\n else:\n A.append(i)\n C.append(B.pop())\n perms.append(A+B+C)\n return perms",
"def kmer(text, i, k):\r\n return text[i:(i+k)]",
"def kmer(text, i, k):\r\n return text[i:(i+k)]",
"def Combinations(n, k):\n if int(n) != n or int(k) != k or n < k or k <= 0:\n return None\n\n if k == n:\n return [range(n)]\n elif k == 1:\n return [[ii] for ii in range(n)]\n\n combinations = Combinations(n-1, k)\n combinations_append_last = Combinations(n-1, k-1)\n for ii in range(len(combinations_append_last)):\n combination = combinations_append_last[ii]\n combination.append(n-1)\n combinations.append(combination)\n return combinations",
"def randomKmers(dna, k):\n kmers = []\n for seq in dna:\n n = len(seq)\n i = random.randint(0, n-k)\n kmer = seq[i:i+k]\n kmers.append( kmer)\n return kmers",
"def generate_kmers(k):\n\n kmers_list = []\n kmers_tuples = itertools.product('ACGT', repeat=k)\n for kmer in kmers_tuples:\n kmers_list.append(''.join(kmer))\n\n return kmers_list",
"def de_bruijn(k, n):\n alphabet = k\n k = len(k)\n\n a = [0] * k * n\n sequence = []\n\n def db(t, p):\n if t > n:\n if n % p == 0:\n sequence.extend(a[1:p + 1])\n else:\n a[t] = a[t - p]\n db(t + 1, p)\n for j in range(a[t - p] + 1, k):\n a[t] = j\n db(t + 1, t)\n\n db(1, 1)\n sequence.extend(sequence[:n - 1])\n\n return \"\".join(alphabet[i] for i in sequence)",
"def subsets(x, k):\n sub_set = set()\n for i in x:\n sub_set = sub_set.union(set(combinations(i, k)))\n return list(sub_set)",
"def partitions(n, k):\n if k == 1:\n yield (n,)\n return\n for i in range(1, n):\n for p in partitions(n-i, k-1):\n yield (i,) + p",
"def beautifulSubsets(self, nums: List[int], k: int) -> int:\n\n \"\"\"\n queue = deque([([], -1)])\n res = 0\n\n while queue:\n cur, idx = queue.popleft()\n res += 1\n\n for i in range(idx + 1, len(nums)):\n if nums[i] - k in cur or nums[i] + k in cur:\n continue\n\n queue.append((cur + [nums[i]], i))\n\n return res - 1\n \"\"\"\n\n \"\"\"\n # dp0 is the ways that without A[i]\n # dp1 is the ways that with A[i]\n\n count = [Counter() for i in range(k)]\n for n in nums:\n count[n % k][n] += 1\n\n res = 1\n for i in range(k):\n prev, dp0, dp1 = 0, 1, 0\n for n in sorted(count[i]):\n v = pow(2, count[i][n])\n if prev + k == n:\n dp0, dp1 = dp0 + dp1, dp0 * (v - 1)\n else:\n dp0, dp1 = dp0 + dp1, (dp0 + dp1) * (v - 1)\n\n prev = n\n\n res *= dp0 + dp1\n\n return res - 1\n \"\"\"\n\n # Count the frequency of A, and then consider all the arithmetic sequence with difference k.\n # Each arithmetic sequence can be solve as a hourse robber problem.\n # We solve the hourse robber by dp.\n # dp(a) return the result for sequence no bigger than a.\n\n # dp(a)[0] is the ways that without a\n # dp(a)[1] is the ways that with a\n\n # dp(a)[0] = dp(a - k)[0] + dp(a - k)[1]\n # dp(a)[1] = dp(a - k)[0] * (2 ^ count(a) - 1\n\n count = Counter(nums)\n\n def dp(n):\n dp0, dp1 = dp(n - k) if n - k in count else (1, 0)\n return dp0 + dp1, dp0 * (pow(2, count[n]) - 1)\n\n return functools.reduce(operator.mul, (sum(dp(n)) for n in count if not count[n + k])) - 1",
"def weak_compositions(n, k):\n if n < 0 or k < 0:\n return\n elif k == 0:\n # the empty sum, by convention, is zero, so only return something if\n # n is zero\n if n == 0:\n yield []\n return\n elif k == 1:\n yield [n]\n return\n else:\n # For each first integer i in range(n+1), list all compositions\n # on n-i nodes, of length at most k-1.\n for i in range(n+1):\n for comp in weak_compositions(n-i, k-1):\n yield [i] + comp",
"def build(s, k, T):\n L = len(T)\n f = int(''.join(T) + str(s))\n sf = k * f\n tail = str(sf)[-(L+1):]\n # print('Multiplying %s by %s gives %s (...%s)' % (f, k, sf, tail))\n assert len(tail) == L + 1\n T = list(tail)\n return T",
"def enumerate_kmers(string, k, start=0):\n for i in range(0, len(string) - k + 1):\n yield start + i, string[i:i+k]",
"def Get(self,k:int): \n ### get partitions depending on the partition schemes C that depends on k!\n return subsets_k(list(range(self._n)),k)"
] | [
"0.6393046",
"0.63518596",
"0.6346528",
"0.63429147",
"0.62951434",
"0.62502956",
"0.62439233",
"0.61887056",
"0.6169127",
"0.6153225",
"0.61380696",
"0.6062542",
"0.6060054",
"0.6024283",
"0.6011528",
"0.6003349",
"0.59933025",
"0.5982021",
"0.5982021",
"0.59802747",
"0.59543186",
"0.5937869",
"0.5930514",
"0.59005123",
"0.5885605",
"0.58799535",
"0.58694834",
"0.5865896",
"0.58610773",
"0.58521634"
] | 0.85543895 | 0 |
Adds a user's mysql tables back into the OCF database. | def _add_mysql(user, options, dump = None):
# Access the new username with user["username"]
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def insert_db():\n populate_tables()",
"def create_tables_and_apply_patches(self):\n\n if self.authorized and not self.db_tables_initiated:\n with self.connection.cursor() as cursor:\n for statement in self.parse_mysql_sql_file():\n cursor.execute(statement)\n\n PyFunceble.LOGGER.info(\n \"Created the missing tables. Applied all patched\"\n )\n\n self.db_tables_initiated = True",
"def drop_tables(self):\n con = self.connect()\n cursor = con.cursor()\n cursor.execute(\"\"\"DROP TABLE IF EXISTS users CASCADE\"\"\")\n cursor.close()\n con.commit()\n con.close()",
"def set_up_db():\n DATABASE.drop_tables([Customer])\n DATABASE.close()\n DATABASE.create_tables([Customer])\n DATABASE.close()",
"def populate_db():\n try:\n users = [\n User(name=u'admin', role=1),\n ]\n db.session.add_all(users)\n db.session.commit()\n except:\n db.session.rollback()\n raise Exception(\"Failed to populate the database\")\n finally:\n db.session.close()",
"def append_table(self, table):\n\n self._db_manager.register_table(table)",
"def drop_tables():\n drop_table(ShoppingList)\n drop_table(User)\n drop_table(Category)",
"def initialize():\n DATABASE.connect()\n DATABASE.create_tables([User, Entry], safe=True)\n DATABASE.close()",
"def add_user(uid):\n if \"drop tables\" in uid:\n raise DropTablesError(\"Drop Tables command detected in input commands - Print Error Message\")\n DB_NAME = 'cloud_storage.db'\n DB_DIRECTORY = 'server_side_storage/'\n db = sqlite3.connect('{}{}'.format(DB_DIRECTORY, DB_NAME))\n user_table_name = uid #This might be changed later\n cursor = db.cursor()\n cursor.execute(\"INSERT INTO user_ids VALUES (NULL, ?,?)\",(uid, user_table_name))\n variable_table_command = '''CREATE TABLE {} (row_id INTEGER PRIMARY KEY AUTOINCREMENT, song_notes TEXT, author_name TEXT, creation_date TEXT, project_name TEXT)'''.format(user_table_name)\n cursor.execute(variable_table_command)\n db.commit()\n cursor.close()\n db.close()",
"def unlockTables(self):\n if self.dbType=='mysql':\n query = \"UNLOCK TABLES\" \n\t self.updateDBAndLog(query)\n\telif self.dbType=='sqlite':\n\t self.db.commit()",
"def create_tables():\n db.create_all()",
"def create_tables():\n db.create_all()",
"def initialize():\n DATABASE.connect()\n DATABASE.create_tables([User], safe=True)\n DATABASE.close()",
"def upgrade():\r\n current_context = op.get_context()\r\n meta = current_context.opts['target_metadata']\r\n user = sa.Table('users', meta, autoload=True)\r\n\r\n # Add the initial admin user account.\r\n op.bulk_insert(user, [{\r\n 'username': u'admin',\r\n 'password': u'$2a$10$LoSEVbN6833RtwbGQlMhJOROgkjHNH4gjmzkLrIxOX1xLXNvaKFyW',\r\n 'email': u'[email protected]',\r\n 'activated': True,\r\n 'is_admin': True,\r\n 'api_key': u'123456',\r\n }\r\n ])",
"def create_db_tables():\n\n try:\n webapp.dbsql.create_all()\n webapp.dbsql.session.commit()\n except Exception as e:\n # TODO: melhorar o informe do erro\n raise e",
"def create_tables():\n db.create_all()",
"def create_users_tables(cls):\n cursor = Database.connect_to_db()\n sql_command = \"\"\"CREATE TABLE IF NOT EXISTS \"public\".\"users\" (\n id SERIAL ,\n firstname VARCHAR(255) NOT NULL,\n lastname VARCHAR(255) NOT NULL,\n othername VARCHAR(255) NOT NULL,\n email VARCHAR(255) NOT NULL,\n phonenumber VARCHAR(255) NOT NULL,\n passporturl TEXT NOT NULL,\n roles VARCHAR(255) NOT NULL,\n nationalid VARCHAR(255) NOT NULL,\n county VARCHAR(255) NOT NULL,\n password VARCHAR(255) NOT NULL,\n date_created VARCHAR(80),\n date_modified VARCHAR(80),\n PRIMARY KEY (id)\n )\n \"\"\"\n cursor.execute(sql_command)",
"def init_tables(self):\n\n settings.Base.metadata.tables[\n 'session_master'].drop(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].drop(bind=settings.engine)\n\n settings.Base.metadata.tables[\n 'session_master'].create(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].create(bind=settings.engine)\n\n logging.info(\"Sessionization Tables created\")",
"def populate_table(\n user, created_at, tweet, retweet_count, id_str, my_database=DATABASE):\n\n dbconnect = connect_db(DATABASE)\n\n cursor = dbconnect.cursor()\n cursor.execute(\"USE airflowdb\")\n\n # add content here\n\n try:\n query=\"INSERT INTO tweets (user, created_at, tweet, retweet_count, id_str) VALUES (%s, %s, %s, %s, %s)\"\n \n cursor.execute(query, (user, created_at, tweet, retweet_count, id_str))\n \n dbconnect.commit()\n print(\"commited\")\n\n except mysql.Error as e:\n print(e)\n dbconnect.rollback()\n\n cursor.close()\n dbconnect.close()\n\n return",
"def init():\n database.create_tables([Tracker])\n database.commit()",
"def sync_tables():\n sync_table(ShoppingList)\n sync_table(User)\n sync_table(Category)\n sync_table(Feed)\n sync_table(News)\n sync_table(Photo)\n sync_table(Profile)\n sync_table(Video)\n sync_type(FeedPhoto)\n sync_type(NewsPhoto)",
"def init_db():\n # users table\n cur.execute(\n \"CREATE TABLE IF NOT EXISTS users (\"\n \"id INTEGER PRIMARY KEY AUTO_INCREMENT,\"\n \"name VARCHAR(255) NOT NULL,\"\n \"email VARCHAR(255) NOT NULL,\"\n \"password VARCHAR(30) NOT NULL,\"\n \"birthdate DATE);\"\n )\n\n # users' phone records table\n cur.execute(\"CREATE TABLE IF NOT EXISTS records (\"\n \"id INTEGER PRIMARY KEY AUTO_INCREMENT,\"\n \"ownerID INTEGER,\"\n \"name VARCHAR(255),\"\n \"phone VARCHAR(22),\"\n \"birthdate DATE);\")",
"def init_db():\n db = get_db()\n Page.create_table(db)\n PageVersion.create_table(db)\n User.create_table(db)",
"def drop_user(self):\n try:\n with self.connect_db:\n request = \"\"\"DROP TABLE IF EXISTS user\"\"\"\n self.connect_db.execute(request)\n self.connect_db.commit()\n\n except Exception:\n super_logger.error('Error drop_user', exc_info=True)",
"def clear_user_table(self,connection):\n sql=\"\"\"DROP TABLE IF EXISTS users CASCADE\"\"\"\n cursor = connection.cursor()\n cursor.execute(sql)",
"def migrateTables(self):\n tables = self.client_from.tables.list(['columns'])\n if len(tables) > 0:\n for table in tables:\n self.client_to.tables.update(table['tableId'], json.dumps(table))\n else:\n print(\"No tables to migrate!\")\n return\n print(len(tables) + \" Tables migrated!\")",
"def createTables():\n conn = getConnection()\n try:\n cur = conn.cursor()\n for table, query in tables.items():\n cur.execute(query)\n conn.commit()\n except Exception as ex:\n print(\"Failed to create tables:\" )\n print(ex)\n sys.exit(1)",
"def _create_schema(self): \n q = (\"CREATE TABLE IF NOT EXISTS \" + \\\n \"profiles (username text, body text, epoch numeric)\",)\n for x in q: self.cursor.execute(x)\n self.conn.commit()",
"def drop_database_tables(cls):\n cursor = Database.connect_to_db()\n # drop users table\n sql_command = \"\"\" DROP TABLE IF EXISTS users CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop parties table\n sql_command = \"\"\" DROP TABLE IF EXISTS parties CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop offices table\n sql_command = \"\"\" DROP TABLE IF EXISTS offices CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop candidates table\n sql_command = \"\"\" DROP TABLE IF EXISTS candidates CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop votes table\n sql_command = \"\"\" DROP TABLE IF EXISTS votes CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop applications table\n sql_command = \"\"\" DROP TABLE IF EXISTS applications CASCADE;\n \"\"\"\n cursor.execute(sql_command)",
"def insert_default_users():\n user1 = User(email=current_app.config['ADMIN_EMAIL'],\n password=current_app.config['ADMIN_PW'],\n first_name=current_app.config['ADMIN_FIRST_NAME'],\n last_name=current_app.config['ADMIN_LAST_NAME'],\n confirmed=True)\n user1.role = Role.query.filter_by(name='Administrator').first()\n db.session.add(user1)\n\n user2 = User(email=current_app.config['USERMANAGER_EMAIL'],\n password=current_app.config['USERMANAGER_PW'],\n first_name=current_app.config['USERMANAGER_FIRST_NAME'],\n last_name=current_app.config['USERMANAGER_LAST_NAME'],\n confirmed=True)\n user2.role = Role.query.filter_by(name='Usermanager').first()\n db.session.add(user2)\n\n user3 = User(email=current_app.config['USER_EMAIL'],\n password=current_app.config['USER_PW'],\n first_name=current_app.config['USER_FIRST_NAME'],\n last_name=current_app.config['USER_LAST_NAME'],\n confirmed=True)\n user3.role = Role.query.filter_by(name='User').first()\n db.session.add(user3)\n\n db.session.commit()"
] | [
"0.6519952",
"0.62828916",
"0.61585516",
"0.6156607",
"0.61473525",
"0.6075949",
"0.5991981",
"0.5991254",
"0.5913686",
"0.5912704",
"0.5908939",
"0.5908939",
"0.5899569",
"0.5880359",
"0.5877914",
"0.5871357",
"0.5836821",
"0.58103013",
"0.57808405",
"0.5778543",
"0.57273936",
"0.5695358",
"0.569088",
"0.56801015",
"0.56705755",
"0.5667713",
"0.5655319",
"0.5647554",
"0.56450355",
"0.56349105"
] | 0.6568307 | 0 |
Class for handling all minidump symbolizing code on Android. | def __init__(self, dump_finder, build_dir, symbols_dir=None):
# Map from minidump path (string) to minidump_dump output (string).
self._minidump_dump_output = {}
# Map from minidump path (string) to the directory that should be used when
# looking for symbol binaries (string).
self._minidump_symbol_binaries_directories = {}
# We use the OS/arch of the host, not the device.
super(AndroidMinidumpSymbolizer, self).__init__(
platform.system().lower(), platform.machine(), dump_finder, build_dir,
symbols_dir=symbols_dir) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def obfuscate():\r\n smali_file_list = u.load_smali_file() # Load smali files\r\n set()\r\n change_all_direct_method(\r\n set(\r\n find_all_direct_method(\r\n list(u.get_android_method_names()) + list(set(find_all_native_method(smali_file_list))),\r\n smali_file_list\r\n )\r\n ),\r\n smali_file_list,\r\n set(\r\n find_all_landroid_ljava_over(\r\n smali_file_list\r\n )\r\n )\r\n )",
"def android_patch() -> None:\n fname = 'src/cpython/Modules/Setup.dist'\n txt = efrotools.readfile(fname)\n\n # Need to switch some flags on this one.\n txt = efrotools.replace_one(txt, '#zlib zlibmodule.c',\n 'zlib zlibmodule.c -lz\\n#zlib zlibmodule.c')\n # Just turn all these on.\n for enable in [\n '#array arraymodule.c', '#cmath cmathmodule.c _math.c',\n '#math mathmodule.c', '#_contextvars _contextvarsmodule.c',\n '#_struct _struct.c', '#_weakref _weakref.c',\n '#_testcapi _testcapimodule.c', '#_random _randommodule.c',\n '#_elementtree -I', '#_pickle _pickle.c',\n '#_datetime _datetimemodule.c', '#_bisect _bisectmodule.c',\n '#_heapq _heapqmodule.c', '#_asyncio _asynciomodule.c',\n '#unicodedata unicodedata.c', '#fcntl fcntlmodule.c',\n '#select selectmodule.c', '#_csv _csv.c',\n '#_socket socketmodule.c', '#_blake2 _blake2/blake2module.c',\n '#binascii binascii.c', '#_posixsubprocess _posixsubprocess.c',\n '#_sha3 _sha3/sha3module.c'\n ]:\n txt = efrotools.replace_one(txt, enable, enable[1:])\n if ENABLE_OPENSSL:\n txt = efrotools.replace_one(txt, '#_ssl _ssl.c \\\\',\n '_ssl _ssl.c -DUSE_SSL -lssl -lcrypto')\n else:\n # Note that the _md5 and _sha modules are normally only built if the\n # system does not have the OpenSSL libs containing an optimized\n # version.\n for enable in [\n '#_md5 md5module.c', '#_sha1 sha1module.c',\n '#_sha256 sha256module.c', '#_sha512 sha512module.c'\n ]:\n txt = efrotools.replace_one(txt, enable, enable[1:])\n\n # Turn this off (its just an example module).\n txt = efrotools.replace_one(txt, 'xxsubtype xxsubtype.c',\n '#xxsubtype xxsubtype.c')\n\n # For whatever reason this stuff isn't in there at all; add it.\n txt += '\\n_json _json.c\\n'\n\n txt += '\\n_lzma _lzmamodule.c -llzma\\n'\n\n txt += ('\\n_sqlite3 -I$(srcdir)/Modules/_sqlite'\n ' -DMODULE_NAME=\\'\\\\\"sqlite3\\\\\"\\' -DSQLITE_OMIT_LOAD_EXTENSION'\n ' -lsqlite3 \\\\\\n'\n ' _sqlite/cache.c \\\\\\n'\n ' _sqlite/connection.c \\\\\\n'\n ' _sqlite/cursor.c \\\\\\n'\n ' _sqlite/microprotocols.c \\\\\\n'\n ' _sqlite/module.c \\\\\\n'\n ' _sqlite/prepare_protocol.c \\\\\\n'\n ' _sqlite/row.c \\\\\\n'\n ' _sqlite/statement.c \\\\\\n'\n ' _sqlite/util.c\\n')\n\n if ENABLE_OPENSSL:\n txt += '\\n\\n_hashlib _hashopenssl.c -DUSE_SSL -lssl -lcrypto\\n'\n\n txt += '\\n\\n*disabled*\\n_ctypes _crypt grp'\n\n efrotools.writefile(fname, txt)\n\n # Ok, this is weird.\n # When applying the module Setup, python looks for any line containing *=*\n # and interprets the whole thing a a global define?...\n # This breaks things for our static sqlite compile above.\n # The check used to look for [A-Z]*=* which didn't break, so let' just\n # change it back to that for now.\n fname = 'src/cpython/Modules/makesetup'\n txt = efrotools.readfile(fname)\n txt = efrotools.replace_one(\n txt, '\t\t*=*)\tDEFS=\"$line$NL$DEFS\"; continue;;',\n '\t\t[A-Z]*=*)\tDEFS=\"$line$NL$DEFS\"; continue;;')\n efrotools.writefile(fname, txt)\n\n print('APPLIED EFROTOOLS ANDROID BUILD PATCHES.')",
"def _binaries_to_symbolize(self):\n raise NotImplementedError()",
"def obfuscate():\n smali_file_list = u.load_smali_file() # Load smali files\n change_all_field(set(find_all_field(smali_file_list)), smali_file_list, set(find_all_landroid_ljava_over(smali_file_list)))",
"def test_llvm_strip(self):\n self.assertEqual(\n self.ndk.llvm_strip,\n f\"/opt/android/android-ndk/toolchains/llvm/prebuilt/{self.ndk.host_tag}/bin/llvm-strip\",\n )",
"def get_apk(self):",
"def test_llvm_objdump(self):\n self.assertEqual(\n self.ndk.llvm_objdump,\n f\"/opt/android/android-ndk/toolchains/llvm/prebuilt/{self.ndk.host_tag}/bin/llvm-objdump\",\n )",
"def _boilerplate_to_python(indent):\n indent_str = \" \" * indent\n boilerplate = indent_str + \"import core.vba_library\\n\"\n boilerplate = indent_str + \"import core.vba_context\\n\"\n boilerplate += indent_str + \"from core.utils import safe_print\\n\"\n boilerplate += indent_str + \"from core.utils import safe_str_convert\\n\"\n boilerplate += indent_str + \"from core.utils import plus\\n\"\n boilerplate += indent_str + \"from core.utils import eq\\n\"\n boilerplate += indent_str + \"from core.utils import neq\\n\"\n boilerplate += indent_str + \"from core.utils import lt\\n\"\n boilerplate += indent_str + \"from core.utils import lte\\n\"\n boilerplate += indent_str + \"from core.utils import gt\\n\"\n boilerplate += indent_str + \"from core.utils import gte\\n\"\n boilerplate += indent_str + \"import core.utils\\n\"\n boilerplate += indent_str + \"from core.python_jit import update_array\\n\"\n boilerplate += indent_str + \"from core.vba_conversion import coerce_to_num\\n\"\n boilerplate += indent_str + \"from core.vba_conversion import coerce_to_int\\n\"\n boilerplate += indent_str + \"from core.vba_conversion import coerce_to_str\\n\"\n boilerplate += indent_str + \"from core.vba_conversion import coerce_to_int_list\\n\\n\"\n boilerplate += indent_str + \"try:\\n\"\n boilerplate += indent_str + \" \" * 4 + \"vm_context\\n\"\n boilerplate += indent_str + \"except (NameError, UnboundLocalError):\\n\"\n boilerplate += indent_str + \" \" * 4 + \"vm_context = context\\n\"\n return boilerplate",
"def test_GetSymbolMapping_normalize(self):\n lines = [\"This is a test case\\n\", \"Caching all mojo apps\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/myapp.mojo at /path/to/.//myapp.mojo/.lM03ws\"]\n golden_dict = {\n \"/path/to/myapp.mojo/.lM03ws\": \"libmyapp_library.so\"\n }\n actual_dict = stack_utils.GetSymbolMapping(lines)\n self.assertDictEqual(golden_dict, actual_dict)",
"def __init__(self, static_lib):\n # TODO: Support dump for reading symbols from static libraries\n assert not static_lib and \"static libs not yet supported with dump\"\n self.tool = self.find_tool()\n if self.tool is None:\n print(\"ERROR: Could not find dump\")\n sys.exit(1)\n self.flags = ['-n', '-v']\n object_mode = environ.get('OBJECT_MODE')\n if object_mode == '32':\n self.flags += ['-X32']\n elif object_mode == '64':\n self.flags += ['-X64']\n else:\n self.flags += ['-X32_64']",
"def test_py2hex_minify_arg():\n with mock.patch('uflash.flash') as mock_flash:\n uflash.py2hex(argv=['tests/example.py', '-m'])\n mock_flash.assert_called_once_with(path_to_python='tests/example.py',\n path_to_runtime=None,\n paths_to_microbits=['tests'],\n minify=True,\n keepname=True)",
"def _GetSymbolBinaryDirectory(self, minidump, libraries):\n if minidump in self._minidump_symbol_binaries_directories:\n return self._minidump_symbol_binaries_directories[minidump]\n\n # Get the processor architecture reported by the minidump.\n arch = None\n matcher = re.compile(_PROCESSOR_ARCH_REGEX)\n for line in self._GetMinidumpDumpOutput(minidump).splitlines():\n match = matcher.match(line)\n if match:\n arch = match.groupdict()['arch'].lower()\n break\n if not arch:\n logging.error('Unable to find processor architecture for minidump %s',\n minidump)\n self._minidump_symbol_binaries_directories[minidump] = None\n return None\n if arch not in _BREAKPAD_ARCH_TO_FILE_REGEX:\n logging.error(\n 'Unsupported processor architecture %s for minidump %s. This is '\n 'likely fixable by adding the correct mapping for the architecture '\n 'in android_minidump_symbolizer._BREAKPAD_ARCH_TO_FILE_REGEX.',\n arch, minidump)\n self._minidump_symbol_binaries_directories[minidump] = None\n return None\n\n # Look for a directory that contains binaries with the correct architecture.\n matcher = re.compile(_BREAKPAD_ARCH_TO_FILE_REGEX[arch])\n symbol_dir = None\n for symbol_subdir in _POSSIBLE_SYMBOL_BINARY_DIRECTORIES:\n possible_symbol_dir = os.path.join(self._build_dir, symbol_subdir)\n if not os.path.exists(possible_symbol_dir):\n continue\n for f in os.listdir(possible_symbol_dir):\n if f not in libraries:\n continue\n binary_path = os.path.join(possible_symbol_dir, f)\n stdout = subprocess.check_output(\n ['file', binary_path], stderr=subprocess.STDOUT)\n if matcher.match(stdout):\n symbol_dir = possible_symbol_dir\n break\n\n if not symbol_dir:\n logging.error(\n 'Unable to find suitable symbol binary directory for architecture %s.'\n 'This is likely fixable by adding the correct directory to '\n 'android_minidump_symbolizer._POSSIBLE_SYMBOL_BINARY_DIRECTORIES.',\n arch)\n self._minidump_symbol_binaries_directories[minidump] = symbol_dir\n return symbol_dir",
"def applyDemapping(self):\n pass",
"def _clean_amm_swaps(cursor: 'DBCursor') -> None:\n log.debug('Enter _clean_amm_swaps')\n cursor.execute('DELETE FROM used_query_ranges WHERE name LIKE \"uniswap_trades%\";')\n cursor.execute('DELETE FROM used_query_ranges WHERE name LIKE \"sushiswap_trades%\";')\n cursor.execute('DELETE FROM used_query_ranges WHERE name LIKE \"balancer_trades%\";')\n cursor.execute('DROP VIEW IF EXISTS combined_trades_view;')\n cursor.execute('DROP TABLE IF EXISTS amm_swaps;')\n log.debug('Exit _clean_amm_swaps')",
"def test_GetSymbolMapping_simple_match(self):\n lines = [\"This is a test case\\n\", \"Caching all mojo apps\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/myapp.mojo at /path/to/myapp.mojo/.lM03ws\"]\n golden_dict = {\n \"/path/to/myapp.mojo/.lM03ws\": \"libmyapp_library.so\"\n }\n actual_dict = stack_utils.GetSymbolMapping(lines)\n self.assertDictEqual(golden_dict, actual_dict)",
"def retore_hal_ptrs(HalDispatchTable,HaliQuerySystemInformation,HalpSetSystemInformation):\n\tif HaliQuerySystemInformation == 0x0 or HalpSetSystemInformation == 0x0:\n\t\treturn \"\"\n\telse:\n\t\tshellcode = (\n\t\t\"\\x31\\xc0\"\n\t\t\"\\xb8\" + struct.pack(\"L\", HalpSetSystemInformation) +\n\t\t\"\\xa3\" + struct.pack(\"L\", HalDispatchTable + 0x8) +\n\t\t\"\\xb8\" + struct.pack(\"L\", HaliQuerySystemInformation) +\n\t\t\"\\xa3\" + struct.pack(\"L\", HalDispatchTable + 0x4)\n\t\t)\n\t\n\t\treturn shellcode",
"def test_llvm_readelf(self):\n self.assertEqual(\n self.ndk.llvm_readelf,\n f\"/opt/android/android-ndk/toolchains/llvm/prebuilt/{self.ndk.host_tag}/bin/llvm-readelf\",\n )",
"def c_code_cache_version(self):\r\n return ()",
"def c_code_cache_version(self):\r\n return ()",
"def swint(self) -> None:",
"def main():\n\n dest_dir = sys.argv[1:][0] or '.'\n print 'Writing logs to: ' + dest_dir\n\n device_id = sys.argv[1:][1] or null\n print 'Using device_id: ' + device_id\n\n # Organize test output by device in case multiple devices are being tested.\n dest_dir = os.path.join(dest_dir, \"perftesting\", device_id)\n\n android_home = os.environ['ANDROID_HOME']\n print 'Your ANDROID_HOME is set to: ' + android_home\n # Uncomment this next line to hardcode your android_home if you can't set\n # it in your environment.\n # android_home = '/full/path/to/android/sdk'\n\n platform_tools = os.path.join(android_home, 'platform-tools')\n current_path = os.environ.get('PATH', '')\n os.environ['PATH'] = (platform_tools if current_path == '' else current_path +\n os.pathsep + platform_tools)\n\n if not os.path.isdir(android_home):\n print 'Your ANDROID_HOME path do not appear to be set in your environment'\n sys.exit(1)\n\n # Your SDK path. Adjust this to your needs.\n sdk_path = android_home\n\n # sets a variable with the package's internal name\n package_name = 'es.developer.achambi.pkmng'\n\n clean_test_files(dest_dir)\n\n # Connects to the current device, returning a MonkeyDevice object\n print 'Waiting for a device to be connected.'\n device = MonkeyRunner.waitForConnection(5, device_id)\n print 'Device connected.'\n\n # Protip1: Remove the screen lock on your test devices then uncomment\n # this like and the same one farther down. This will prevent you from\n # worrying about whether your device display has gone to sleep.\n # Alternatively, you can use the \"never sleep when charging\" developer\n # ption.\n # device.press(\"KEYCODE_POWER\", \"DOWN_AND_UP\")\n\n enable_dump_permission(sdk_path, device_id, dest_dir, package_name)\n enable_storage_permission(sdk_path, device_id, dest_dir, package_name)\n\n open_app(device, package_name)\n\n # Clear the dumpsys data for the next run must be done immediately\n # after open_app().\n reset_graphics_dumpsys(device, package_name)\n\n run_tests_and_systrace(sdk_path, device, device_id, dest_dir,\n package_name)\n\n # Device files could be in either location on various devices.\n pull_device_data_files(sdk_path, device_id,\n '/storage/emulated/0/Android/data/',\n dest_dir, package_name, '1')\n pull_device_data_files(sdk_path, device_id,\n '/storage/emulated/legacy/Android/data/',\n dest_dir, package_name, '2')\n\n # Protip1: See comment above.\n # device.press(\"KEYCODE_POWER\", \"DOWN_AND_UP\")\n\n analyze_data_files(dest_dir)",
"def testPullMinidumps(self):\n def GetDumpLocation(_=None):\n return '/sdcard/dumps/'\n\n platform_backend = self._browser_backend.platform_backend\n time_offset = platform_backend.GetDeviceHostClockOffset()\n platform_backend.GetDumpLocation = GetDumpLocation\n remote_path = posixpath.join(GetDumpLocation(), 'Crashpad', 'pending')\n self._browser_backend.device.RunShellCommand(['mkdir', '-p', remote_path])\n # Android's implementation of \"touch\" doesn't support setting time via\n # Unix timestamps, only via dates, which are affected by timezones. So,\n # figure out what the device's timestamp for January 2nd, 1970 is and use\n # that to calculate the expected local timestamp. January 2nd is used\n # instead of January 1st so that we can't get accidentally get a negative\n # timestamp if the host-device clock offset is negative.\n remote_dump_file = posixpath.join(remote_path, 'test_dump')\n self._browser_backend.device.RunShellCommand(\n ['touch', '-d', '1970-01-02T00:00:00', remote_dump_file])\n device_mtime = self._browser_backend.device.RunShellCommand(\n ['stat', '-c', '%Y', remote_dump_file], single_line=True)\n device_mtime = int(device_mtime.strip())\n try:\n self._browser_backend.PullMinidumps()\n finally:\n self._browser_backend.device.RemovePath(GetDumpLocation(), recursive=True)\n\n local_path = os.path.join(\n self._browser_backend._tmp_minidump_dir, 'test_dump')\n self.assertTrue(os.path.exists(local_path))\n self.assertEqual(os.path.getmtime(local_path), device_mtime - time_offset)",
"def sanitize(info):\n if \"processor\" in info and info[\"processor\"] == \"universal-x86-x86_64\":\n # If we're running on OS X 10.6 or newer, assume 64-bit\n if release[:4] >= \"10.6\": # Note this is a string comparison\n info[\"processor\"] = \"x86_64\"\n info[\"bits\"] = 64\n else:\n info[\"processor\"] = \"x86\"\n info[\"bits\"] = 32",
"def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from .android_required_password_type import AndroidRequiredPasswordType\n from .device_compliance_policy import DeviceCompliancePolicy\n from .device_threat_protection_level import DeviceThreatProtectionLevel\n\n from .android_required_password_type import AndroidRequiredPasswordType\n from .device_compliance_policy import DeviceCompliancePolicy\n from .device_threat_protection_level import DeviceThreatProtectionLevel\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"deviceThreatProtectionEnabled\": lambda n : setattr(self, 'device_threat_protection_enabled', n.get_bool_value()),\n \"deviceThreatProtectionRequiredSecurityLevel\": lambda n : setattr(self, 'device_threat_protection_required_security_level', n.get_enum_value(DeviceThreatProtectionLevel)),\n \"minAndroidSecurityPatchLevel\": lambda n : setattr(self, 'min_android_security_patch_level', n.get_str_value()),\n \"osMaximumVersion\": lambda n : setattr(self, 'os_maximum_version', n.get_str_value()),\n \"osMinimumVersion\": lambda n : setattr(self, 'os_minimum_version', n.get_str_value()),\n \"passwordExpirationDays\": lambda n : setattr(self, 'password_expiration_days', n.get_int_value()),\n \"passwordMinimumLength\": lambda n : setattr(self, 'password_minimum_length', n.get_int_value()),\n \"passwordMinutesOfInactivityBeforeLock\": lambda n : setattr(self, 'password_minutes_of_inactivity_before_lock', n.get_int_value()),\n \"passwordPreviousPasswordBlockCount\": lambda n : setattr(self, 'password_previous_password_block_count', n.get_int_value()),\n \"passwordRequired\": lambda n : setattr(self, 'password_required', n.get_bool_value()),\n \"passwordRequiredType\": lambda n : setattr(self, 'password_required_type', n.get_enum_value(AndroidRequiredPasswordType)),\n \"securityBlockJailbrokenDevices\": lambda n : setattr(self, 'security_block_jailbroken_devices', n.get_bool_value()),\n \"securityDisableUsbDebugging\": lambda n : setattr(self, 'security_disable_usb_debugging', n.get_bool_value()),\n \"securityPreventInstallAppsFromUnknownSources\": lambda n : setattr(self, 'security_prevent_install_apps_from_unknown_sources', n.get_bool_value()),\n \"securityRequireCompanyPortalAppIntegrity\": lambda n : setattr(self, 'security_require_company_portal_app_integrity', n.get_bool_value()),\n \"securityRequireGooglePlayServices\": lambda n : setattr(self, 'security_require_google_play_services', n.get_bool_value()),\n \"securityRequireSafetyNetAttestationBasicIntegrity\": lambda n : setattr(self, 'security_require_safety_net_attestation_basic_integrity', n.get_bool_value()),\n \"securityRequireSafetyNetAttestationCertifiedDevice\": lambda n : setattr(self, 'security_require_safety_net_attestation_certified_device', n.get_bool_value()),\n \"securityRequireUpToDateSecurityProviders\": lambda n : setattr(self, 'security_require_up_to_date_security_providers', n.get_bool_value()),\n \"securityRequireVerifyApps\": lambda n : setattr(self, 'security_require_verify_apps', n.get_bool_value()),\n \"storageRequireEncryption\": lambda n : setattr(self, 'storage_require_encryption', n.get_bool_value()),\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields",
"def _debugmallocstats(): # real signature unknown; restored from __doc__\n pass",
"def _magic_bgmt(self, s):\n s = s.strip()\n if idapy._d is None:\n print \"Please select a dump first. Example:\"\n print \"sel t2i\"\n return\n a = addr_from_magic_string(s, rounded_32bit = False)\n f = idapy._d.Fun(\"gui_massive_event_loop\")\n r = find_refs(idapy._d, a, f.addr)\n \n for a,v in r:\n bkt.back_deco(a)\n \n print r",
"def _fix_up(self, cls, code_name):",
"def dump_proc_self_maps():\n return",
"def _assemble_smali(self, dest, source):\n try:\n subprocess.check_call(['smali', 'a', source, '-o', dest])\n except EnvironmentError:\n self.skipTest('smali not available')",
"def StripPC(addr):\n global ARCH\n if ARCH == \"arm\":\n return addr & ~1\n return addr"
] | [
"0.5797563",
"0.53851175",
"0.5382028",
"0.5153331",
"0.4875175",
"0.48596218",
"0.47921395",
"0.4589166",
"0.45467687",
"0.45267266",
"0.45106924",
"0.44272697",
"0.441203",
"0.43683136",
"0.43274632",
"0.43264234",
"0.43178105",
"0.43031862",
"0.43031862",
"0.42946658",
"0.42870143",
"0.42786124",
"0.42674002",
"0.42580718",
"0.42565688",
"0.42432654",
"0.42379734",
"0.42342746",
"0.42235175",
"0.42225"
] | 0.6551352 | 0 |
Returns a list of paths to binaries where symbols may be located. | def GetSymbolBinaries(self, minidump):
libraries = self._ExtractLibraryNamesFromDump(minidump)
symbol_binary_dir = self._GetSymbolBinaryDirectory(minidump, libraries)
if not symbol_binary_dir:
return []
return [os.path.join(symbol_binary_dir, lib) for lib in libraries] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_binaries():\n\n builddir = Path(__file__).parent.parent / \"builddir\"\n\n bins = []\n\n for folder in [\"examples\", \"tests\", \"tools\"]:\n for path in sorted((builddir / folder).rglob(\"*\")):\n if path.stem.startswith(\"xnvme_single\"):\n continue\n if path.stem.startswith(\"xnvme_dev\"):\n continue\n if path.stem.startswith(\"xnvme_enum\"):\n continue\n if path.is_file() and path.stat().st_mode & os.X_OK:\n bins.append(path.name)\n\n return bins",
"def _GetDefaultBinPathExcludes(self):\n if sys.platform == \"win32\":\n import cx_Freeze.util\n systemDir = cx_Freeze.util.GetSystemDir()\n windowsDir = cx_Freeze.util.GetWindowsDir()\n return [windowsDir, systemDir, os.path.join(windowsDir, \"WinSxS\")]\n elif sys.platform == \"darwin\":\n return [\"/lib\", \"/usr/lib\", \"/System/Library/Frameworks\"]\n else:\n return [\"/lib\", \"/lib32\", \"/lib64\", \"/usr/lib\", \"/usr/lib32\",\n \"/usr/lib64\"]",
"def dir_bin():\n return abspath('bin')",
"def _GetSymbolBinaryDirectory(self, minidump, libraries):\n if minidump in self._minidump_symbol_binaries_directories:\n return self._minidump_symbol_binaries_directories[minidump]\n\n # Get the processor architecture reported by the minidump.\n arch = None\n matcher = re.compile(_PROCESSOR_ARCH_REGEX)\n for line in self._GetMinidumpDumpOutput(minidump).splitlines():\n match = matcher.match(line)\n if match:\n arch = match.groupdict()['arch'].lower()\n break\n if not arch:\n logging.error('Unable to find processor architecture for minidump %s',\n minidump)\n self._minidump_symbol_binaries_directories[minidump] = None\n return None\n if arch not in _BREAKPAD_ARCH_TO_FILE_REGEX:\n logging.error(\n 'Unsupported processor architecture %s for minidump %s. This is '\n 'likely fixable by adding the correct mapping for the architecture '\n 'in android_minidump_symbolizer._BREAKPAD_ARCH_TO_FILE_REGEX.',\n arch, minidump)\n self._minidump_symbol_binaries_directories[minidump] = None\n return None\n\n # Look for a directory that contains binaries with the correct architecture.\n matcher = re.compile(_BREAKPAD_ARCH_TO_FILE_REGEX[arch])\n symbol_dir = None\n for symbol_subdir in _POSSIBLE_SYMBOL_BINARY_DIRECTORIES:\n possible_symbol_dir = os.path.join(self._build_dir, symbol_subdir)\n if not os.path.exists(possible_symbol_dir):\n continue\n for f in os.listdir(possible_symbol_dir):\n if f not in libraries:\n continue\n binary_path = os.path.join(possible_symbol_dir, f)\n stdout = subprocess.check_output(\n ['file', binary_path], stderr=subprocess.STDOUT)\n if matcher.match(stdout):\n symbol_dir = possible_symbol_dir\n break\n\n if not symbol_dir:\n logging.error(\n 'Unable to find suitable symbol binary directory for architecture %s.'\n 'This is likely fixable by adding the correct directory to '\n 'android_minidump_symbolizer._POSSIBLE_SYMBOL_BINARY_DIRECTORIES.',\n arch)\n self._minidump_symbol_binaries_directories[minidump] = symbol_dir\n return symbol_dir",
"def get_symbols(obj_path):\n cmd = ['nm', obj_path]\n res = subprocess.run(cmd, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, check=True)\n\n return res.stdout.decode()",
"def buildExecutablesList( path ):\n\n result = []\n for item in os.listdir( path ):\n candidate = path + item\n if not os.path.islink( candidate ):\n continue # Not a symlink at all\n if not os.path.exists( candidate ):\n logging.warning( \"Broken symlink detected: \" + candidate )\n continue # Broken link\n if not os.access( candidate, os.X_OK ):\n logging.warning( \"Symlink to a non-executable file: \" + candidate )\n continue # No permissions to execute\n\n result.append( candidate )\n return result",
"def locate_scripts():\n scripts = []\n bin_dir = os.path.join(os.getcwd(), 'bin')\n if not os.path.isdir(bin_dir):\n return scripts\n for item in os.listdir(bin_dir):\n full_path = os.path.join(bin_dir, item)\n if os.path.isfile(full_path):\n with open(full_path) as f:\n first_line = next(f)\n if first_line.startswith('#!'):\n scripts.append(full_path)\n return scripts",
"def _binaries_to_symbolize(self):\n raise NotImplementedError()",
"def getpaths(self,libname):\n if os.path.isabs(libname):\n yield libname\n else:\n # FIXME / TODO return '.' and os.path.dirname(__file__)\n for path in self.getplatformpaths(libname):\n yield path\n\n path = ctypes.util.find_library(libname)\n if path: yield path",
"def getpaths(self,libname):\n if os.path.isabs(libname):\n yield libname\n else:\n # FIXME / TODO return '.' and os.path.dirname(__file__)\n for path in self.getplatformpaths(libname):\n yield path\n\n path = ctypes.util.find_library(libname)\n if path: yield path",
"def filepaths(self) -> Dict[str, 'BinPackageFile']:\n return self._get_package_files()",
"def binaries_path(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)",
"def get_symbols(self):\n symbols = os.environ.get('SYMBOLS', 'btc,eth')\n if not symbols:\n return 'btc,eth'\n return symbols",
"def pathext_list():\n return (os.environ.get(\"PATHEXT\") or \"\").split(os.pathsep)",
"def find_all_pythons():\n \n allpys = []\n \n # split PATH according to platform rules\n pathlist = string.split( os.environ['PATH'], os.pathsep )\n\n # search PATH, excluding nonexistant dirs\n for path in filter( os.path.isdir, pathlist ):\n allpys.extend( find_pythons_in_dir( path ) )\n\n # check the win32 registry, as appropriate\n allpys.extend( get_pythons_from_registry() )\n\n # and of course I'm running under a Python, in case\n # no others were found\n allpys.append( os.path.abspath(sys.executable) )\n \n return allpys",
"def path_list():\n return (os.environ.get(\"PATH\", None) or os.defpath).split(os.pathsep)",
"def _executable_names(executable):\n\n if os.name == 'nt':\n pathext = os.environ.get('PATHEXT', '').split(os.pathsep)\n for ext in pathext:\n yield executable + ext\n\n else:\n yield executable",
"def binpath(self):\n return self._query_config()['binpath']",
"def get_path(self):\r\n path = [\"/bin\", \"/usr/bin\", \"/usr/local/bin\"]\r\n if \"PATH\" in os.environ:\r\n p = os.environ[\"PATH\"]\r\n if p:\r\n path = p.split(os.pathsep)\r\n return path",
"def build_filelist(basepath):\n log.info(\"Building list of files containing EDM symbols in %s\", basepath)\n symbol_files = []\n for dir_path, _, filenames in os.walk(basepath):\n for filename in filenames:\n filepath = os.path.join(dir_path, filename)\n if filename.endswith(\".opi\") and utils.grep(filepath, \"EDM Symbol\"):\n symbol_files.append(filepath)\n\n return symbol_files",
"def binpath(self):\n return self.__bin",
"def binaries(gmxpath, gmxsuff):\n def gmx_path(binary_path):\n return os.path.join(gmxpath, binary_path + gmxsuff)\n\n if which('gmx_d'):\n logger.debug(\"Using double precision binaries for gromacs\")\n main_binary = gmx_path('gmx_d')\n grompp_bin = [main_binary, 'grompp']\n mdrun_bin = [main_binary, 'mdrun']\n genergy_bin = [main_binary, 'energy']\n elif which('grompp_d') and which('mdrun_d') and which('g_energy_d'):\n logger.debug(\"Using double precision binaries\")\n grompp_bin = [gmx_path('grompp_d')]\n mdrun_bin = [gmx_path('mdrun_d')]\n genergy_bin = [gmx_path('g_energy_d')]\n elif which('gmx'):\n logger.debug(\"Using double precision binaries\")\n main_binary = gmx_path('gmx')\n grompp_bin = [main_binary, 'grompp']\n mdrun_bin = [main_binary, 'mdrun']\n genergy_bin = [main_binary, 'energy']\n elif which('grompp') and which('mdrun') and which('g_energy'):\n logger.debug(\"Using single precision binaries\")\n grompp_bin = [gmx_path('grompp')]\n mdrun_bin = [gmx_path('mdrun')]\n genergy_bin = [gmx_path('g_energy')]\n else:\n raise IOError('Unable to find gromacs executables.')\n return grompp_bin, mdrun_bin, genergy_bin",
"def syspaths(self):\n res = []\n for path, jsmodule in self.jsmodules.items():\n if jsmodule.js_lib_path != \"\":\n js_lib_path = os.path.dirname(jsmodule.js_lib_path.rstrip(\"/\")) # get parent\n if not js_lib_path in res:\n res.append(js_lib_path)\n return res",
"def get_rpaths(pkg):\n rpaths = [pkg.prefix.lib, pkg.prefix.lib64]\n deps = get_rpath_deps(pkg)\n rpaths.extend(d.prefix.lib for d in deps if os.path.isdir(d.prefix.lib))\n rpaths.extend(d.prefix.lib64 for d in deps if os.path.isdir(d.prefix.lib64))\n # Second module is our compiler mod name. We use that to get rpaths from\n # module show output.\n if pkg.compiler.modules and len(pkg.compiler.modules) > 1:\n rpaths.append(path_from_modules([pkg.compiler.modules[1]]))\n return list(dedupe(filter_system_paths(rpaths)))",
"def get_rpaths(dylib_path):\n load_cmds = get_load_commands(dylib_path)\n rpath_cmds = filter(lambda cmd: cmd.name == 'LC_RPATH', load_cmds)\n path_re = re.compile('path (?P<rpath>.*) \\(.*\\)')\n\n rpaths = []\n for cmd in rpath_cmds:\n for line in cmd.lines:\n match = path_re.search(line)\n if match:\n rpaths.append(match.group('rpath'))\n return rpaths",
"def library_search_path(self, pedantic=False):\n return []",
"def path_to_bin_files(path):\r\n files_list=list_of_files(path)\r\n for file in files_list:\r\n asm_lines = parse_data(file)\r\n symbols_dict = init_symbols_dictionary()\r\n collect_symbols_and_ignore_coments(asm_lines, symbols_dict)\r\n bin_lines = translate_to_binary(asm_lines, symbols_dict)\r\n create_output(bin_lines, file)",
"def bin_search(binary):\n if sys.platform == 'win32':\n # Directory containing 'binary' should be in PATH\n return binary\n result = None\n mode = os.R_OK | os.X_OK\n for p in bin_search_path:\n path = join(p, binary)\n if os.access(path, mode) == 1:\n result = path\n break\n else:\n raise MissingBinary('Unable to find binary \"%s\"' % binary)\n return result",
"def _GetDefaultBinIncludes(self):\n if sys.platform == \"win32\":\n pythonDll = \"python%s%s.dll\" % sys.version_info[:2]\n return [pythonDll, \"gdiplus.dll\", \"mfc71.dll\", \"msvcp71.dll\",\n \"msvcr71.dll\"]\n else:\n soName = distutils.sysconfig.get_config_var(\"INSTSONAME\")\n if soName is None:\n return []\n pythonSharedLib = self._RemoveVersionNumbers(soName)\n return [pythonSharedLib]",
"def get_roots(self):\n roots = []\n for symbol in self.GlobalSymbolDict.values():\n if symbol.isRoot():\n roots += [symbol]\n return roots"
] | [
"0.7554683",
"0.65706384",
"0.6247244",
"0.6195482",
"0.61567324",
"0.61457145",
"0.6059669",
"0.60321474",
"0.60022146",
"0.60022146",
"0.59460664",
"0.5926765",
"0.5923688",
"0.5919705",
"0.58894503",
"0.5882842",
"0.5870094",
"0.5855827",
"0.58501244",
"0.5832909",
"0.58105147",
"0.5784442",
"0.5758196",
"0.5745872",
"0.57256746",
"0.5721621",
"0.5702363",
"0.5696622",
"0.56915563",
"0.56731075"
] | 0.7863217 | 0 |
Extracts library names that may contain symbols from the minidump. This is a duplicate of the logic in Chromium's //build/android/stacktrace/crashpad_stackwalker.py. | def _ExtractLibraryNamesFromDump(self, minidump):
default_library_name = 'libmonochrome.so'
minidump_dump_output = self._GetMinidumpDumpOutput(minidump)
if not minidump_dump_output:
logging.warning(
'Could not get minidump_dump output, defaulting to library %s',
default_library_name)
return [default_library_name]
library_names = []
module_library_line_re = re.compile(r'[(]code_file[)]\s+= '
r'"(?P<library_name>lib[^. ]+.so)"')
in_module = False
for line in minidump_dump_output.splitlines():
line = line.lstrip().rstrip('\n')
if line == 'MDRawModule':
in_module = True
continue
if line == '':
in_module = False
continue
if in_module:
m = module_library_line_re.match(line)
if m:
library_names.append(m.group('library_name'))
if not library_names:
logging.warning(
'Could not find any library name in the dump, '
'default to: %s', default_library_name)
return [default_library_name]
return library_names | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def GetSymbolBinaries(self, minidump):\n libraries = self._ExtractLibraryNamesFromDump(minidump)\n symbol_binary_dir = self._GetSymbolBinaryDirectory(minidump, libraries)\n if not symbol_binary_dir:\n return []\n\n return [os.path.join(symbol_binary_dir, lib) for lib in libraries]",
"def get_platform_und_symbols():\n ret = None\n if osname_is_freebsd():\n ret = sorted([\"environ\", \"__progname\"])\n if is_verbose():\n print(\"Checking for required UND symbols... \" + str(ret))\n return ret",
"def extract_symbols(lib_file, static_lib=None):\n if static_lib is None:\n static_lib = is_static_library(lib_file)\n if sys.platform.startswith('aix'):\n extractor = AIXDumpExtractor(static_lib=static_lib)\n elif ReadElfExtractor.find_tool() and not static_lib:\n extractor = ReadElfExtractor(static_lib=static_lib)\n else:\n extractor = NMExtractor(static_lib=static_lib)\n return extractor.extract(lib_file)",
"def _GetSymbolBinaryDirectory(self, minidump, libraries):\n if minidump in self._minidump_symbol_binaries_directories:\n return self._minidump_symbol_binaries_directories[minidump]\n\n # Get the processor architecture reported by the minidump.\n arch = None\n matcher = re.compile(_PROCESSOR_ARCH_REGEX)\n for line in self._GetMinidumpDumpOutput(minidump).splitlines():\n match = matcher.match(line)\n if match:\n arch = match.groupdict()['arch'].lower()\n break\n if not arch:\n logging.error('Unable to find processor architecture for minidump %s',\n minidump)\n self._minidump_symbol_binaries_directories[minidump] = None\n return None\n if arch not in _BREAKPAD_ARCH_TO_FILE_REGEX:\n logging.error(\n 'Unsupported processor architecture %s for minidump %s. This is '\n 'likely fixable by adding the correct mapping for the architecture '\n 'in android_minidump_symbolizer._BREAKPAD_ARCH_TO_FILE_REGEX.',\n arch, minidump)\n self._minidump_symbol_binaries_directories[minidump] = None\n return None\n\n # Look for a directory that contains binaries with the correct architecture.\n matcher = re.compile(_BREAKPAD_ARCH_TO_FILE_REGEX[arch])\n symbol_dir = None\n for symbol_subdir in _POSSIBLE_SYMBOL_BINARY_DIRECTORIES:\n possible_symbol_dir = os.path.join(self._build_dir, symbol_subdir)\n if not os.path.exists(possible_symbol_dir):\n continue\n for f in os.listdir(possible_symbol_dir):\n if f not in libraries:\n continue\n binary_path = os.path.join(possible_symbol_dir, f)\n stdout = subprocess.check_output(\n ['file', binary_path], stderr=subprocess.STDOUT)\n if matcher.match(stdout):\n symbol_dir = possible_symbol_dir\n break\n\n if not symbol_dir:\n logging.error(\n 'Unable to find suitable symbol binary directory for architecture %s.'\n 'This is likely fixable by adding the correct directory to '\n 'android_minidump_symbolizer._POSSIBLE_SYMBOL_BINARY_DIRECTORIES.',\n arch)\n self._minidump_symbol_binaries_directories[minidump] = symbol_dir\n return symbol_dir",
"def extract_functions(elf_path):\n text_data = objdump_section(elf_path, '.text')\n name_to_addr = parse_func_names(text_data)\n return name_to_addr",
"def __init__(self, dump_finder, build_dir, symbols_dir=None):\n # Map from minidump path (string) to minidump_dump output (string).\n self._minidump_dump_output = {}\n # Map from minidump path (string) to the directory that should be used when\n # looking for symbol binaries (string).\n self._minidump_symbol_binaries_directories = {}\n # We use the OS/arch of the host, not the device.\n super(AndroidMinidumpSymbolizer, self).__init__(\n platform.system().lower(), platform.machine(), dump_finder, build_dir,\n symbols_dir=symbols_dir)",
"def _ExtractLibraryLoadAddressesFromLogcat(logs):\n browser_libs = LibraryLoadMap()\n renderer_libs = LibraryLoadMap()\n for m in re_library_address.finditer(logs):\n process_type, lib_name, lib_address = m.groups()\n lib_address = int(lib_address, 16)\n if process_type == 'BROWSER':\n browser_libs[lib_name] = lib_address\n elif process_type == 'RENDERER':\n renderer_libs[lib_name] = lib_address\n else:\n assert False, 'Invalid process type'\n\n return browser_libs, renderer_libs",
"def get_libraries_names():\n rpm_packages_path = path.join(PMDK_PATH, 'rpm', SYSTEM_ARCHITECTURE)\n libraries_names = [elem.split('-')[0] for elem in listdir(rpm_packages_path)\n if PMDK_VERSION in elem]\n return set(libraries_names)",
"def _LoadGlobalSymbolsFromDump(dump_obj):\n symbols = set()\n for key in (\"elf_functions\", \"elf_objects\"):\n symbols.update(\n symbol.get(\"name\", \"\") for symbol in dump_obj.get(key, []) if\n symbol.get(\"binding\", \"global\") == \"global\")\n return symbols",
"def get_memory_tool_labels(stacktrace):\n # Remove stack frames and paths to source code files. This helps to avoid\n # confusion when function names or source paths contain a memory tool token.\n data = ''\n for line in stacktrace.split('\\n'):\n if STACKFRAME_LINE_REGEX.match(line):\n continue\n data += line + '\\n'\n\n labels = [t['label'] for t in MEMORY_TOOLS_LABELS if t['token'] in data]\n return labels",
"def test_GetSymbolMapping_normalize(self):\n lines = [\"This is a test case\\n\", \"Caching all mojo apps\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/myapp.mojo at /path/to/.//myapp.mojo/.lM03ws\"]\n golden_dict = {\n \"/path/to/myapp.mojo/.lM03ws\": \"libmyapp_library.so\"\n }\n actual_dict = stack_utils.GetSymbolMapping(lines)\n self.assertDictEqual(golden_dict, actual_dict)",
"def _GetSymbolNameToFilename(build_directory):\n symbol_extractor.CheckLlvmNmExists()\n path = os.path.join(build_directory, 'obj')\n object_filenames = cyglog_to_orderfile.GetObjectFilenames(path)\n pool = multiprocessing.Pool()\n symbol_names_filename = zip(\n pool.map(symbol_extractor.SymbolNamesFromLlvmBitcodeFile,\n object_filenames),\n object_filenames)\n pool.close()\n result = {}\n for (symbol_names, filename) in symbol_names_filename:\n stripped_filename = filename[len(build_directory):]\n if stripped_filename.startswith('/obj/'):\n stripped_filename = stripped_filename[len('/obj/'):]\n for s in symbol_names:\n result[s] = stripped_filename\n return result",
"def get_symbols(doc, lib):\n\n basename = lib.replace(\".dll\", \"\").lower()\n filename = os.path.join(get_hopper_script_dir(), basename + \".txt\")\n if not os.path.exists(filename):\n doc.log(\"Symbol file not found: %s\" % filename)\n return None\n\n symbols = {}\n with open(filename, \"r\") as fp:\n for i, line in enumerate(fp, 1):\n match = symbol_line.match(line)\n if not match:\n doc.log(\"Skipping line %d: Malformed\" % i)\n continue\n\n ordinal, name = match.group(1), match.group(2)\n if ordinal and name:\n symbols[ordinal] = name\n\n return symbols",
"def extract(self, lib):\n cmd = [self.nm_exe] + self.flags + [lib]\n out = subprocess.check_output(cmd).decode()\n fmt_syms = (self._extract_sym(l)\n for l in out.splitlines() if l.strip())\n # Cast symbol to string.\n final_syms = (repr(s) for s in fmt_syms if self._want_sym(s))\n # Make unique and sort strings.\n tmp_list = list(sorted(set(final_syms)))\n # Cast string back to symbol.\n return util.read_syms_from_list(tmp_list)",
"def _DiffElfSymbols(self, dump_obj, parser):\n dump_symbols = self._LoadGlobalSymbolsFromDump(dump_obj)\n lib_symbols = parser.ListGlobalDynamicSymbols(include_weak=True)\n return sorted(dump_symbols.difference(lib_symbols))",
"def decode_traceback(traceback_str, in_binfile=\"\", search_dirs=[]):\n print (\"Decoding: \" + traceback_str + \"\\n\")\n tb_str = \"Traceback=\"\n start = traceback_str.find(tb_str)\n if start < 0:\n print (\"Error: Unexpected Traceback string: it must contain '\" + tb_str + \"'\")\n return \"Error: Failed to decode\"\n tokens = traceback_str[(start + len(tb_str)):].split()\n if len(tokens) < 1:\n print (\"Error: Unexpected Traceback string: too short\")\n return \"Error: Failed to decode\"\n # the first token must be the binary that generates the traceback\n binfile = find_binfile(in_binfile, tokens[0], search_dirs)\n verbose(\"The main binary file is: \" + str(binfile), LEVEL_0)\n shlib_db = dict()\n if args.useldd and binfile and os.path.exists(binfile):\n # only if -u command option is specified\n shlib_db = get_all_shlib_paths_via_ldd(binfile)\n verbose(\"The shared library database from ldd is: \" + str(shlib_db), LEVEL_2)\n decode_result = \"\"\n for token in tokens:\n if '+' in token:\n result = get_addr2line(token, binfile, search_dirs, shlib_db)\n verbose(result, LEVEL_0)\n decode_result += result + \"\\n\"\n return decode_result",
"def testStacktraceParsing(self, mock_get_dependency):\n mock_get_dependency.return_value = {\n 'chrome': Dependency('chrome', 'https://repo', 'rev1')\n }\n\n uma_data = self._GetDummyUMAData()\n actual_stack_trace = uma_data.stacktrace\n\n stack_frame0 = stacktrace.ProfilerStackFrame(\n 0, 0.1, float('inf'), False, 'chrome', 'wWinMain',\n 'app/chrome_exe_main_win.cc', 'chrome/app/chrome_exe_main_win.cc',\n 'https://repo', 484,\n (stacktrace.FunctionLine(line=490, sample_fraction=0.7),\n stacktrace.FunctionLine(line=511, sample_fraction=0.3)),\n (stacktrace.FunctionLine(line=490, sample_fraction=0.9),\n stacktrace.FunctionLine(line=511, sample_fraction=0.1)))\n stack_frame1 = stacktrace.ProfilerStackFrame(\n 1, 0.2, 6.1, False, 'chrome', 'MainDllLoader::Launch(HINSTANCE__ *)',\n 'app/main_dll_loader_win.cc', 'chrome/app/main_dll_loader_win.cc',\n 'https://repo', 117, None)\n frames0 = (stack_frame0, stack_frame1)\n\n stack_frame2 = stacktrace.ProfilerStackFrame(\n 0, 0.3, float('inf'), False, 'chrome', 'wWinMain',\n 'app/chrome_exe_main_win.cc', 'chrome/app/chrome_exe_main_win.cc',\n 'https://repo', 484, None)\n frames1 = (stack_frame2,)\n\n call_stack0 = stacktrace.CallStack(0, frames0,\n stacktrace.CallStackFormatType.DEFAULT,\n stacktrace.LanguageType.CPP)\n call_stack1 = stacktrace.CallStack(0, frames1,\n stacktrace.CallStackFormatType.DEFAULT,\n stacktrace.LanguageType.CPP)\n stacks = (call_stack0, call_stack1)\n expected_stacktrace = stacktrace.Stacktrace(stacks, call_stack0)\n\n self._VerifyTwoStackFramesEqual(actual_stack_trace.stacks[0].frames[0],\n stack_frame0)\n self._VerifyTwoStacktracesEqual(actual_stack_trace, expected_stacktrace)",
"def _getImports_ldd(pth):\n rslt = set()\n if is_aix:\n # Match libs of the form\n # 'archivelib.a(objectmember.so/.o)'\n # or\n # 'sharedlib.so'\n # Will not match the fake lib '/unix'\n lddPattern = re.compile(r\"^\\s*(((?P<libarchive>(.*\\.a))(?P<objectmember>\\(.*\\)))|((?P<libshared>(.*\\.so))))$\")\n elif is_hpux:\n # Match libs of the form\n # 'sharedlib.so => full-path-to-lib\n # e.g.\n # 'libpython2.7.so => /usr/local/lib/hpux32/libpython2.7.so'\n lddPattern = re.compile(r\"^\\s+(.*)\\s+=>\\s+(.*)$\")\n elif is_solar:\n # Match libs of the form\n # 'sharedlib.so => full-path-to-lib\n # e.g.\n # 'libpython2.7.so.1.0 => /usr/local/lib/libpython2.7.so.1.0'\n # Will not match the platform specific libs starting with '/platform'\n lddPattern = re.compile(r\"^\\s+(.*)\\s+=>\\s+(.*)$\")\n else:\n lddPattern = re.compile(r\"\\s*(.*?)\\s+=>\\s+(.*?)\\s+\\(.*\\)\")\n\n for line in compat.exec_command('ldd', pth).splitlines():\n m = lddPattern.search(line)\n if m:\n if is_aix:\n libarchive = m.group('libarchive')\n if libarchive:\n # We matched an archive lib with a request for a particular\n # embedded shared object.\n # 'archivelib.a(objectmember.so/.o)'\n lib = libarchive\n name = os.path.basename(lib) + m.group('objectmember')\n else:\n # We matched a stand-alone shared library.\n # 'sharedlib.so'\n lib = m.group('libshared')\n name = os.path.basename(lib)\n elif is_hpux:\n name, lib = m.group(1), m.group(2)\n else:\n name, lib = m.group(1), m.group(2)\n if name[:10] in ('linux-gate', 'linux-vdso'):\n # linux-gate is a fake library which does not exist and\n # should be ignored. See also:\n # http://www.trilithium.com/johan/2005/08/linux-gate/\n continue\n\n if os.path.exists(lib):\n # Add lib if it is not already found.\n if lib not in rslt:\n rslt.add(lib)\n else:\n logger.error('Can not find %s in path %s (needed by %s)',\n name, lib, pth)\n return rslt",
"def test_GetSymbolMapping_simple_match(self):\n lines = [\"This is a test case\\n\", \"Caching all mojo apps\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/myapp.mojo at /path/to/myapp.mojo/.lM03ws\"]\n golden_dict = {\n \"/path/to/myapp.mojo/.lM03ws\": \"libmyapp_library.so\"\n }\n actual_dict = stack_utils.GetSymbolMapping(lines)\n self.assertDictEqual(golden_dict, actual_dict)",
"def DumpSymbols(lib_path, dump_path):\n elf_parser = ExternalModules.elf_parser\n parser = None\n try:\n parser = elf_parser.ElfParser(lib_path)\n symbols = parser.ListGlobalDynamicSymbols()\n finally:\n if parser:\n parser.Close()\n if not symbols:\n return \"No symbols\"\n symbols.sort()\n with open(dump_path, \"w\") as dump_file:\n dump_file.write(\"\\n\".join(symbols) + \"\\n\")\n return \"Output: \" + dump_path",
"def get_dlls(comments):\n dlls = [line for line in comments if '.dll' in line.lower()]\n return list(set(line.split()[-1].lower() for line in dlls))",
"def get_symbols(obj_path):\n cmd = ['nm', obj_path]\n res = subprocess.run(cmd, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, check=True)\n\n return res.stdout.decode()",
"def _filter_stack(\n stack: List[traceback.FrameSummary],\n) -> List[traceback.FrameSummary]:\n # strip the task factory frame in the vanilla event loop\n if (\n stack[-1].filename.endswith(\"asyncio/base_events.py\")\n and stack[-1].name == \"create_task\"\n ):\n stack = stack[:-1]\n # strip the loop.create_task frame\n if (\n stack[-1].filename.endswith(\"asyncio/tasks.py\")\n and stack[-1].name == \"create_task\"\n ):\n stack = stack[:-1]\n _cut_idx = 0\n for _cut_idx, f in reversed(list(enumerate(stack))):\n # uvloop\n if f.filename.endswith(\"asyncio/runners.py\") and f.name == \"run\":\n break\n # vanilla\n if f.filename.endswith(\"asyncio/events.py\") and f.name == \"_run\":\n break\n return stack[_cut_idx + 1 :]",
"def _extract_r_remove_package_names(log):\n start = \"remove.packages(c(\"\n i_start = log.find(start) + len(start)\n i_end = log.find(\")\", i_start)\n package_names = [name.strip('\"') for name in log[i_start:i_end].split(\",\")]\n return package_names",
"def test_GetSymbolMapping_multiple_match(self):\n lines = [\"This is a test case\\n\", \"Caching all mojo apps\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/myapp.mojo at /path/to/myapp.mojo/.lM03ws\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/otherapp.mojo at /path/to/otherapp.mojo/.kW07s\"]\n golden_dict = {\n \"/path/to/myapp.mojo/.lM03ws\": \"libmyapp_library.so\",\n \"/path/to/otherapp.mojo/.kW07s\": \"libotherapp_library.so\"\n }\n actual_dict = stack_utils.GetSymbolMapping(lines)\n self.assertDictEqual(golden_dict, actual_dict)",
"def extract_function_name(maybe_function_str: str) -> Optional[str]:\n match = STACK_TRACE_LINE_RE.search(maybe_function_str)\n if match is not None:\n return match.group(2)\n return None",
"def analyze_source(source, prefix):\n symbolre = re.compile(r\"[\\s:;&\\|\\<\\>\\=\\^\\+\\-\\*/\\(\\)\\?]\" + prefix + \"([a-zA-Z0-9_]+)[\\s\\(]\")\n results = symbolre.findall(source, re.MULTILINE)\n ret = set()\n for ii in results:\n symbolset = set()\n symbolset.add(ii)\n ret = ret.union(symbolset)\n return ret",
"def get_search_keywords(testcase):\n crash_state_lines = testcase.crash_state.splitlines()\n # Use top 2 frames for searching.\n return crash_state_lines[:2]",
"def fix_mangled_includes(ln):\n m = proginput_re.search(ln)\n if m:\n fn = m.group(1)\n dName = os.path.dirname(fn)\n fName = os.path.basename(fn)\n for fn2 in os.listdir(dName):\n if fn2.replace(\"_\",\"\") == fName:\n ln = \"\\\\programinput{\" + os.path.join(dName,fn2) + \"}\\n\"\n break\n return ln",
"def get_paths_containing_string_in_threadstack(target_str, stack_context=2):\n frames = []\n list_of_frames = [\n get_paths_containing_string_in_locals(target_str,\n val.frame.f_locals,\n locals_dict_ref_str=\"[get_paths_containing_string_in_locals(target_str, val.frame.f_locals) for idx, \"\n \"val in enumerate(inspect.getouterframes(inspect.currentframe(), \"\n \"2)) if \\\"pydev\\\" not in val.filename]\")\n for idx, val in enumerate(inspect.getouterframes(inspect.currentframe(), stack_context))\n if \"pydev\" not in val.filename]\n\n for x in list_of_frames:\n for y in x:\n frames.append(y)\n return frames"
] | [
"0.606551",
"0.5674643",
"0.56701034",
"0.5526038",
"0.5405917",
"0.5370439",
"0.5335965",
"0.52976125",
"0.52932",
"0.52613264",
"0.523408",
"0.52253866",
"0.5208155",
"0.51849467",
"0.51835585",
"0.5180916",
"0.51393825",
"0.51383364",
"0.513191",
"0.5112789",
"0.5104473",
"0.50578815",
"0.50216293",
"0.50041467",
"0.5003027",
"0.49749237",
"0.49329785",
"0.49150857",
"0.4866886",
"0.48564276"
] | 0.74637324 | 0 |
Gets the directory that should contain symbol binaries for |minidump|. | def _GetSymbolBinaryDirectory(self, minidump, libraries):
if minidump in self._minidump_symbol_binaries_directories:
return self._minidump_symbol_binaries_directories[minidump]
# Get the processor architecture reported by the minidump.
arch = None
matcher = re.compile(_PROCESSOR_ARCH_REGEX)
for line in self._GetMinidumpDumpOutput(minidump).splitlines():
match = matcher.match(line)
if match:
arch = match.groupdict()['arch'].lower()
break
if not arch:
logging.error('Unable to find processor architecture for minidump %s',
minidump)
self._minidump_symbol_binaries_directories[minidump] = None
return None
if arch not in _BREAKPAD_ARCH_TO_FILE_REGEX:
logging.error(
'Unsupported processor architecture %s for minidump %s. This is '
'likely fixable by adding the correct mapping for the architecture '
'in android_minidump_symbolizer._BREAKPAD_ARCH_TO_FILE_REGEX.',
arch, minidump)
self._minidump_symbol_binaries_directories[minidump] = None
return None
# Look for a directory that contains binaries with the correct architecture.
matcher = re.compile(_BREAKPAD_ARCH_TO_FILE_REGEX[arch])
symbol_dir = None
for symbol_subdir in _POSSIBLE_SYMBOL_BINARY_DIRECTORIES:
possible_symbol_dir = os.path.join(self._build_dir, symbol_subdir)
if not os.path.exists(possible_symbol_dir):
continue
for f in os.listdir(possible_symbol_dir):
if f not in libraries:
continue
binary_path = os.path.join(possible_symbol_dir, f)
stdout = subprocess.check_output(
['file', binary_path], stderr=subprocess.STDOUT)
if matcher.match(stdout):
symbol_dir = possible_symbol_dir
break
if not symbol_dir:
logging.error(
'Unable to find suitable symbol binary directory for architecture %s.'
'This is likely fixable by adding the correct directory to '
'android_minidump_symbolizer._POSSIBLE_SYMBOL_BINARY_DIRECTORIES.',
arch)
self._minidump_symbol_binaries_directories[minidump] = symbol_dir
return symbol_dir | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_bin_dir():\n return os.path.abspath(os.path.join(get_root_dir(), 'bin/'))",
"def GetSymbolBinaries(self, minidump):\n libraries = self._ExtractLibraryNamesFromDump(minidump)\n symbol_binary_dir = self._GetSymbolBinaryDirectory(minidump, libraries)\n if not symbol_binary_dir:\n return []\n\n return [os.path.join(symbol_binary_dir, lib) for lib in libraries]",
"def dir_bin():\n return abspath('bin')",
"def GetBinDirectory(self, *args):\n return _gmat_py.FileManager_GetBinDirectory(self, *args)",
"def _getCodeFolder(self):\n if getattr(sys, 'frozen', False):\n # we are running in a bundle (frozen)\n bundle_dir = sys._MEIPASS\n else:\n # we are running in a normal Python environment\n bundle_dir = os.path.dirname(os.path.abspath(__file__))\n return bundle_dir",
"def binpath(self):\n return self.__bin",
"def GetPackageDirectory():\n return os.path.dirname(__file__)",
"def getDebugDirectory(self) -> ghidra.app.util.bin.format.pe.debug.DebugDirectory:\n ...",
"def get_crash_dumps_path(self):\n\t\treturn call_sdk_function('PrlApi_GetCrashDumpsPath')",
"def get_exec_path():\n if hasattr(sys, \"frozen\"): # compiled by py2exe\n return os.path.dirname(sys.executable)\n else:\n return os.path.dirname(sys.path[0]) # should be path to /fpdb",
"def binpath(self):\n return self._query_config()['binpath']",
"def find_tool():\n return shutil.which('dump')",
"def get_condor_bin_dir(config):\n condor_root = config['condor-root']\n if condor_root:\n return osp.join(condor_root, 'bin')\n else:\n return ''",
"def module_path():\n return os.path.dirname(unicode(__file__, sys.getfilesystemencoding( )))",
"def binary_location(cmd, USE_PATH=False):\n return os.path.join(BIN_PREFIX, cmd)",
"def get_bin_path(self, filename):\n pg_config = get_config()\n if pg_config is None or \"BINDIR\" not in pg_config:\n return filename\n else:\n return os.path.join(pg_config.get(\"BINDIR\"), filename)",
"def output_dir():\n #pdbid=os.path.splitext(os.path.basename(PDB_PATH))[0]\n outpath = os.path.join(OUTPUT_DIR, pdbid(),\"\") # OUTPUT DIRECTORY WHERE OUTPUT FILES WILL GO\n\n return outpath",
"def get_exec_path(self):\n bin_name = 'test_hint_time'\n # Look for in place build\n script_dir = os.path.dirname(os.path.realpath(__file__))\n bin_path = os.path.join(script_dir, '.libs', bin_name)\n if not os.path.exists(bin_path):\n # Look for out of place build from using apps/build_func.sh\n int_dir = os.path.dirname(script_dir)\n bin_path_op = os.path.join(int_dir, 'build/integration/test/.libs', bin_name)\n if not os.path.exists(bin_path_op):\n msg = 'Could not find application binary, tried \\n \"{}\"\\n \"{}\"'.format(\n bin_path, bin_path_op)\n raise RuntimeError(msg)\n bin_path = bin_path_op\n return bin_path",
"def mapto_dir(self):\r\n return os.path.join(self._work_dir, 'mapped-jars')",
"def __GetGenModuleDir(cls, src):\n return os.path.dirname(src.replace(FileUtils.GetSrcRoot(), cls.GetSwigOutDir()))",
"def bin_root(self):\n return os.path.join(self.build_dir, self.build, \"stage0\")",
"def get_debug_directory(self):\n \n try:\n data_dir = self.debug_dir()\n except ValueError, why:\n return obj.NoneObject(str(why))\n\n return obj.Object(\"_IMAGE_DEBUG_DIRECTORY\", \n offset = self.DllBase + data_dir.VirtualAddress, \n vm = self.obj_native_vm)",
"def get_package_dir():\n return Path(__file__).parent",
"def find_binaries():\n\n builddir = Path(__file__).parent.parent / \"builddir\"\n\n bins = []\n\n for folder in [\"examples\", \"tests\", \"tools\"]:\n for path in sorted((builddir / folder).rglob(\"*\")):\n if path.stem.startswith(\"xnvme_single\"):\n continue\n if path.stem.startswith(\"xnvme_dev\"):\n continue\n if path.stem.startswith(\"xnvme_enum\"):\n continue\n if path.is_file() and path.stat().st_mode & os.X_OK:\n bins.append(path.name)\n\n return bins",
"def getBinary():\n binary = shutil.which(_ROCKETLOGGER_BINARY)\n\n if not os.path.exists(binary):\n raise FileNotFoundError(f\"Could not find RocketLogger CLI binary! [{binary}]\")\n return os.path.abspath(binary)",
"def thisdir():\n if getattr(sys, 'frozen', False):\n # The application is frozen\n return os.path.dirname(sys.executable)\n else:\n # The application is not frozen\n # Change this bit to match where you store your data files:\n return os.path.dirname(__file__)",
"def get_bitstream_path():\n\n env = get_env()\n\n # Derive destination path\n cache_dir = os.getenv(\"VTA_CACHE_PATH\", os.path.join(os.getenv(\"HOME\"), \".vta_cache/\"))\n cache_dir = os.path.join(cache_dir, env.TARGET)\n cache_dir = os.path.join(cache_dir, env.HW_VER.replace(\".\", \"_\"))\n # Create the directory if it didn't exist\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n bit_path = os.path.join(cache_dir, env.BITSTREAM) + \".bit\"\n\n return bit_path",
"def get_binary_name():\n return os.path.basename(inspect.stack()[-1][1])[:16]",
"def get_pack_path():\r\n return get_package_path().replace(\"\\\\\", \"/\").replace(\"src\", \"\")",
"def get_install_dir(self):\n return EventGenerator.get_install_dir(self) + \"/madgraph4/src\""
] | [
"0.6687989",
"0.668011",
"0.6666962",
"0.6321019",
"0.63035196",
"0.6107489",
"0.6062449",
"0.6049191",
"0.6031628",
"0.6012374",
"0.5988652",
"0.5959031",
"0.5932072",
"0.5915141",
"0.59118515",
"0.59096825",
"0.5886469",
"0.5836009",
"0.58122456",
"0.58107346",
"0.58003944",
"0.57813776",
"0.57147396",
"0.5705924",
"0.5696492",
"0.5696148",
"0.569374",
"0.5686065",
"0.5680613",
"0.56766015"
] | 0.7879615 | 0 |
Runs minidump_dump on the given minidump. Caches the result for reuse. | def _GetMinidumpDumpOutput(self, minidump):
if minidump in self._minidump_dump_output:
logging.debug('Returning cached minidump_dump output for %s', minidump)
return self._minidump_dump_output[minidump]
dumper_path = local_first_binary_manager.GetInstance().FetchPath(
'minidump_dump')
if not os.access(dumper_path, os.X_OK):
logging.warning('Cannot run minidump_dump because %s is not found.',
dumper_path)
return None
# Using subprocess.check_output with stdout/stderr mixed can result in
# errors due to log messages showing up in the minidump_dump output. So,
# use Popen and combine into a single string afterwards.
p = subprocess.Popen(
[dumper_path, minidump], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
stdout = stdout + '\n' + stderr
if p.returncode != 0:
# Dumper errors often do not affect stack walkability, just a warning.
# It's possible for the same stack to be symbolized multiple times, so
# add a timestamp suffix to prevent artifact collisions.
now = datetime.datetime.now()
suffix = now.strftime('%Y-%m-%d-%H-%M-%S')
artifact_name = 'dumper_errors/%s-%s' % (
os.path.basename(minidump), suffix)
logging.warning(
'Reading minidump failed, but likely not actually an issue. Saving '
'output to artifact %s', artifact_name)
artifact_logger.CreateArtifact(artifact_name, stdout)
if stdout:
self._minidump_dump_output[minidump] = stdout
return stdout | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def testPullMinidumps(self):\n def GetDumpLocation(_=None):\n return '/sdcard/dumps/'\n\n platform_backend = self._browser_backend.platform_backend\n time_offset = platform_backend.GetDeviceHostClockOffset()\n platform_backend.GetDumpLocation = GetDumpLocation\n remote_path = posixpath.join(GetDumpLocation(), 'Crashpad', 'pending')\n self._browser_backend.device.RunShellCommand(['mkdir', '-p', remote_path])\n # Android's implementation of \"touch\" doesn't support setting time via\n # Unix timestamps, only via dates, which are affected by timezones. So,\n # figure out what the device's timestamp for January 2nd, 1970 is and use\n # that to calculate the expected local timestamp. January 2nd is used\n # instead of January 1st so that we can't get accidentally get a negative\n # timestamp if the host-device clock offset is negative.\n remote_dump_file = posixpath.join(remote_path, 'test_dump')\n self._browser_backend.device.RunShellCommand(\n ['touch', '-d', '1970-01-02T00:00:00', remote_dump_file])\n device_mtime = self._browser_backend.device.RunShellCommand(\n ['stat', '-c', '%Y', remote_dump_file], single_line=True)\n device_mtime = int(device_mtime.strip())\n try:\n self._browser_backend.PullMinidumps()\n finally:\n self._browser_backend.device.RemovePath(GetDumpLocation(), recursive=True)\n\n local_path = os.path.join(\n self._browser_backend._tmp_minidump_dir, 'test_dump')\n self.assertTrue(os.path.exists(local_path))\n self.assertEqual(os.path.getmtime(local_path), device_mtime - time_offset)",
"def dumbcache_dump(self, cache_dir=r'data\\cache'):\n obj = self\n\n DUMBCACHE = os.path.join(r'..', cache_dir, r'br_store.dmp')\n with open(DUMBCACHE, 'wb') as f:\n pkl.dump(obj, f)",
"def testPullMinidumpsLockedFilesIgnored(self):\n def GetDumpLocation(_=None):\n return '/sdcard/dumps/'\n\n platform_backend = self._browser_backend.platform_backend\n platform_backend.GetDumpLocation = GetDumpLocation\n remote_path = posixpath.join(GetDumpLocation(), 'Crashpad', 'pending')\n self._browser_backend.device.RunShellCommand(['mkdir', '-p', remote_path])\n remote_dump_file = posixpath.join(remote_path, 'test_dump')\n remote_locked_dump_file = posixpath.join(remote_path, 'locked_dump')\n self._browser_backend.device.RunShellCommand(\n ['touch', remote_dump_file])\n self._browser_backend.device.RunShellCommand(\n ['touch', remote_locked_dump_file])\n self._browser_backend.device.RunShellCommand(\n ['touch', remote_locked_dump_file + '.lock'])\n try:\n self._browser_backend.PullMinidumps()\n finally:\n self._browser_backend.device.RemovePath(GetDumpLocation(), recursive=True)\n\n local_path = os.path.join(\n self._browser_backend._tmp_minidump_dir, 'test_dump')\n self.assertTrue(os.path.exists(local_path))\n local_path = os.path.join(\n self._browser_backend._tmp_minidump_dir, 'locked_dump')\n self.assertFalse(os.path.exists(local_path))",
"def disasm_dump(bin, addr):\n return cache.access((bin,addr), lambda x: disasm_work(*x))",
"def testPullMinidumpsOnlyNew(self):\n def GetDumpLocation(_=None):\n return '/sdcard/dumps/'\n\n local_old_dump_path = os.path.join(\n self._browser_backend._tmp_minidump_dir, 'old_dump')\n with open(local_old_dump_path, 'w'):\n pass\n old_dump_time = os.stat(local_old_dump_path).st_mtime\n\n platform_backend = self._browser_backend.platform_backend\n platform_backend.GetDumpLocation = GetDumpLocation\n remote_path = posixpath.join(GetDumpLocation(), 'Crashpad', 'pending')\n self._browser_backend.device.RunShellCommand(['mkdir', '-p', remote_path])\n remote_dump_file = posixpath.join(remote_path, 'new_dump')\n self._browser_backend.device.RunShellCommand(\n ['touch', '-d', '1970-01-02T00:00:00', remote_dump_file])\n remote_dump_file = posixpath.join(remote_path, 'old_dump')\n self._browser_backend.device.RunShellCommand(\n ['touch', '-d', '1970-01-02T00:00:00', remote_dump_file])\n\n try:\n self._browser_backend.PullMinidumps()\n finally:\n self._browser_backend.device.RemovePath(GetDumpLocation(), recursive=True)\n\n local_new_dump_path = os.path.join(\n self._browser_backend._tmp_minidump_dir, 'new_dump')\n self.assertTrue(os.path.exists(local_new_dump_path))\n self.assertTrue(os.path.exists(local_old_dump_path))\n # A changed mtime would mean that the dump was re-pulled\n self.assertEqual(os.stat(local_old_dump_path).st_mtime, old_dump_time)",
"def test_dump_calls_pg_dump(mocker):\n\tmocker.patch('subprocess.Popen')\n\tassert pgdump.dump(url)\n\tsubprocess.Popen.assert_called_with(['pg_dump', url], stdout=subprocess.PIPE)",
"def PullDumps(self, host_dir):\n # The device/emulator's clock might be off from the host, so calculate an\n # offset that can be added to the host time to get the corresponding device\n # time.\n # The offset is (device_time - host_time), so a positive value means that\n # the device clock is ahead.\n time_offset = self.GetDeviceHostClockOffset()\n\n stdout, _ = self.RunCmdOnDevice([\n 'ls', '-1',\n cmd_helper.SingleQuote(self.ExpandUser(self.MINIDUMP_DIR))\n ])\n device_dumps = stdout.splitlines()\n for dump_filename in device_dumps:\n host_path = os.path.join(host_dir, dump_filename)\n # Skip any ignored files since they're not useful and could be deleted by\n # the time we try to pull them.\n if _IsIgnoredFileType(dump_filename):\n continue\n if os.path.exists(host_path):\n continue\n device_path = cmd_helper.SingleQuote(\n posixpath.join(self.MINIDUMP_DIR, dump_filename))\n\n # Skip any directories that happen to be in the list.\n if self.IsDir(device_path):\n continue\n\n # Skip any files that have a corresponding .lock file, as that implies the\n # file hasn't been fully written to disk yet.\n device_lock_path = cmd_helper.SingleQuote(\n posixpath.join(self.MINIDUMP_DIR, dump_filename + '.lock'))\n if self.FileExistsOnDevice(device_lock_path):\n logging.debug('Not pulling file %s because a .lock file exists for it',\n device_path)\n continue\n try:\n self.GetFile(device_path, host_path)\n except Exception as e: # pylint: disable=broad-except\n logging.error('Failed to get file %s: %s', device_path, e)\n continue\n # Set the local version's modification time to the device's.\n stdout, _ = self.RunCmdOnDevice(\n ['ls', '--time-style', '+%s', '-l', device_path])\n stdout = stdout.strip()\n # We expect whitespace-separated fields in this order:\n # mode, links, owner, group, size, mtime, filename.\n # Offset by the difference of the device and host clocks.\n device_mtime = int(stdout.split()[5])\n host_mtime = device_mtime - time_offset\n os.utime(host_path, (host_mtime, host_mtime))",
"def clean_dump(self, dump_filename):\n # Read dump file generated by mysqldump\n with open(dump_filename, \"r\") as dump_file:\n dump_text = regroup_inserts(dump_file.read())\n\n with open(dump_filename, \"w\") as dump_file:\n # add the use database statement at the beginning\n dump_file.write(\"USE {};\\n\".format(self.database_name))\n\n # overwrite the dump with the new single statement version\n dump_file.write(dump_text)",
"def run_dump(self, expanded, unexpanded) : \n\t\tif len(expanded) < 2 :\n\t\t\treturn self.errormessage(\"Needs at least a destination directory and one object id to dump\")\n\t\tdestination = os.path.normpath(os.path.expanduser(expanded[0])) # in case there's a ~username\n\t\tif not os.path.isdir(destination) :\n\t\t\treturn self.errormessage(\"%s is not a directory\" % destination)\n\t\tstatus = 0\n\t\tfor arg in expanded[1:] :\n\t\t\tobject = self.toObject(self.__context, arg)\n\t\t\tif object is None :\n\t\t\t\tstatus = status + self.errormessage(\"Object %s doesn't exist\" % arg)\n\t\t\telif not self.HasPerms(object, 'View management screens') :\n\t\t\t\tstatus = status - 1\n\t\t\telif not hasattr(object, \"document_src\") or not callable(object.document_src) :\n\t\t\t\tstatus = status + self.errormessage(\"Doesn't know how to dump object %s\" % arg)\n\t\t\telse :\n\t\t\t\tfname = os.path.join(destination, object.getId())\n\t\t\t\ttry :\n\t\t\t\t\tfout = open(fname, \"wb\")\n\t\t\t\t\tfout.write(object.document_src())\n\t\t\t\t\tfout.close()\n\t\t\t\t\tself.htmlmessage(\"Object %s dumped to server as %s\" % (self.ObjectPath(object), fname))\n\t\t\t\texcept IOError, msg :\n\t\t\t\t\tstatus = status + self.errormessage('Error %s, occured while dumping %s' % (msg, arg))\n\t\treturn status",
"def testPullMinidumpsLockFilesIgnored(self):\n def GetDumpLocation(_=None):\n return '/sdcard/dumps/'\n\n platform_backend = self._browser_backend.platform_backend\n platform_backend.GetDumpLocation = GetDumpLocation\n remote_path = posixpath.join(GetDumpLocation(), 'Crashpad', 'pending')\n self._browser_backend.device.RunShellCommand(['mkdir', '-p', remote_path])\n remote_dump_file = posixpath.join(remote_path, 'test_dump')\n remote_lock_file = posixpath.join(remote_path, 'test_file.lock')\n self._browser_backend.device.RunShellCommand(\n ['touch', remote_dump_file])\n self._browser_backend.device.RunShellCommand(\n ['touch', remote_lock_file])\n try:\n self._browser_backend.PullMinidumps()\n finally:\n self._browser_backend.device.RemovePath(GetDumpLocation(), recursive=True)\n\n local_path = os.path.join(\n self._browser_backend._tmp_minidump_dir, 'test_dump')\n self.assertTrue(os.path.exists(local_path))\n local_path = os.path.join(\n self._browser_backend._tmp_minidump_dir, 'test_file.lock')\n self.assertFalse(os.path.exists(local_path))",
"def do_disassemble(self, args):\n if len(args) != 0:\n args = args.split(' ')\n self.u_start = self.ParseAddressExpr(args[0])\n self.u_size = self.ParseAddressExpr(args[1]) if len(args) > 1 else 0x20\n skip = False\n else:\n # Skip the first instruction if we reuse the last address.\n skip = True\n\n if not self.reader.IsValidAddress(self.u_start):\n print(\"Address %s is not contained within the minidump!\" % (\n self.reader.FormatIntPtr(self.u_start)))\n return\n lines = self.reader.GetDisasmLines(self.u_start, self.u_size)\n if len(lines) == 0:\n print(\"Address %s could not be disassembled!\" % (\n self.reader.FormatIntPtr(self.u_start)))\n print(\" Could not disassemble using %s.\" % OBJDUMP_BIN)\n print(\" Pass path to architecture specific objdump via --objdump?\")\n return\n for line in lines:\n if skip:\n skip = False\n continue\n print(FormatDisasmLine(self.u_start, self.heap, line))\n # Set the next start address = last line\n self.u_start += lines[-1][0]\n print()",
"def split_debug(src, objcopy=None, objdump=None):\n if objcopy is None:\n objcopy = \"objcopy\"\n if objdump is None:\n objdump = \"objdump\"\n if not contains_debug_info(src, objdump=objdump):\n ui.info(\"-- Already stripped\", src)\n return\n src_stat = os.stat(src)\n dirname, basename = os.path.split(src)\n debug_dir = os.path.join(dirname, \".debug\")\n qisys.sh.mkdir(debug_dir)\n dest = os.path.join(src, debug_dir, basename)\n to_run = list()\n to_run.append([objcopy, \"--only-keep-debug\", src, dest])\n to_run.append([objcopy,\n \"--strip-debug\",\n \"--strip-unneeded\",\n \"--add-gnu-debuglink=%s\" % dest,\n src])\n try:\n for cmd in to_run:\n qisys.command.check_output(cmd, stderr=subprocess.STDOUT)\n ui.info(\"-- Debug info extracted for\", src)\n except qisys.command.CommandFailedException as e:\n ui.error(\"Error while Extracting package debug for %s\" % src)\n ui.error(str(e))\n # After the commands have run, utime of the file has changed, causing\n # cmake to re-install the libraries. Which is not cool ...\n # So set back mtime to its previous value:\n os.utime(src, (src_stat.st_atime, src_stat.st_mtime))",
"def test_execute_dump_site_transaction(self):\n\n instruction = Instruction(\"dump(3)\")\n\n with std_out() as (out, err):\n self.transaction_manager.execute(instruction)\n\n output = out.getvalue().strip()\n self.assertEqual(output, \"{'x14': { x14: 140 }, 'x18': { x18: 180 }, 'x10': { x10: 100 }, 'x8': { x8: 80 }, 'x16': { x16: 160 }, 'x2': { x2: 20 }, 'x12': { x12: 120 }, 'x6': { x6: 60 }, 'x20': { x20: 200 }, 'x4': { x4: 40 }}\")",
"def backup_dump(self):\n errors = Queue.Queue()\n threads = []\n for host in self.shards:\n t = threading.Thread(target=host.mongodump, args=(errors,))\n threads.append(t)\n if self.config_server is not None:\n t = threading.Thread(target=self.config_server.mongodump, args=(errors,))\n threads.append(t)\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n if not errors.empty():\n # We don't really care for all errors, so just through the first one\n raise Exception(errors.get())",
"def clean_dump(filename):\n from .utils import clean_dump as clean\n click.echo('Starting to cleaning {}'.format(filename))\n clean(filename)\n click.echo('Finished cleaning {}'.format(filename))",
"def PullDumps(self, host_dir):\n # The device/emulator's clock might be off from the host, so calculate an\n # offset that can be added to the host time to get the corresponding device\n # time.\n # The offset is (device_time - host_time), so a positive value means that\n # the device clock is ahead.\n time_offset = self.GetDeviceHostClockOffset()\n\n stdout, _ = self.RunCmdOnDevice(\n ['ls', '-1', cmd_helper.SingleQuote(self.CROS_MINIDUMP_DIR)])\n device_dumps = stdout.splitlines()\n for dump_filename in device_dumps:\n host_path = os.path.join(host_dir, dump_filename)\n # Skip any ignored files since they're not useful and could be deleted by\n # the time we try to pull them.\n if _IsIgnoredFileType(dump_filename):\n continue\n if os.path.exists(host_path):\n continue\n device_path = cmd_helper.SingleQuote(\n posixpath.join(self.CROS_MINIDUMP_DIR, dump_filename))\n # Skip any directories that happen to be in the list.\n stdout, _ = self.RunCmdOnDevice(['test', '-f', device_path, '&&',\n 'echo', 'true', '||', 'echo', 'false'])\n if 'false' in stdout:\n continue\n # Skip any files that have a corresponding .lock file, as that implies the\n # file hasn't been fully written to disk yet.\n device_lock_path = device_path + '.lock'\n if self.FileExistsOnDevice(device_lock_path):\n logging.debug('Not pulling file %s because a .lock file exists for it',\n device_path)\n continue\n try:\n self.GetFile(device_path, host_path)\n except Exception as e: # pylint: disable=broad-except\n logging.error('Failed to get file %s: %s', device_path, e)\n continue\n # Set the local version's modification time to the device's.\n stdout, _ = self.RunCmdOnDevice(\n ['ls', '--time-style', '+%s', '-l', device_path])\n stdout = stdout.strip()\n # We expect whitespace-separated fields in this order:\n # mode, links, owner, group, size, mtime, filename.\n # Offset by the difference of the device and host clocks.\n device_mtime = int(stdout.split()[5])\n host_mtime = device_mtime - time_offset\n os.utime(host_path, (host_mtime, host_mtime))",
"def dump(self):\n self.dumpInit.emit()\n\n worker = DumpThread()\n thread = QtCore.QThread(self)\n self.__thread_maps['dump'] = [thread, worker]\n worker.moveToThread(thread)\n\n worker.dumpSig.connect(self.dumpSig)\n worker.dumpDone.connect(self.dumpDone)\n thread.started.connect(worker.dump)\n\n thread.start()",
"def test_dump_call_pgdump(mocker):\n mocker.patch('subprocess.Popen')\n assert pgdump.dump(url)\n subprocess.Popen.assert_called_with(['pg_dump' , url] , stdout=subprocess.PIPE)",
"def do_before_dump(self):\n self.checksummer.prepare_checksums()",
"def dump(self, dump_path: str):\n if self.parsed_data is None:\n raise ValueError(f'{self.resource_name} -- no parsed data to dump '\n f'(hint: call `parse` method first)')\n dump(path=dump_path, data=self.parsed_data, resource_name=self.resource_name)",
"def includeInDump(self):\n pass",
"def dumpMetaServerChunkMap(metaServer, dumpMetaFile, defaultMetaFile, defaultCheckPoint):\n\n # Get latest checkpoint file\n # Gzip latest file and copy it locally\n print \"Compressing latest checkpoint %s on %s\" % (defaultCheckPoint, metaServer.node)\n if not os.path.exists(\"./checkpointdir\"):\n command = \"mkdir ./checkpointdir\"\n os.system(command)\n command = \"ssh -o StrictHostKeyChecking=no %s gzip -c %s > ./checkpointdir/latest.gz\" % (metaServer.node, defaultCheckPoint)\n os.system(command)\n\n #print \"Copying latest checkpoint file %s.gz\" % defaultCheckPoint\n #command = \"scp -o StrictHostKeyChecking=no %s:%s.gz ./checkpointdir\" % (metaServer.node, defaultCheckPoint)\n #os.system(command)\n\n print \"Uncompressing latest checkpoint ./checkpointdir/latest.gz\" \n command = \"gunzip -f ./checkpointdir/latest.gz\"\n os.system(command)\n \n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((metaServer.node, metaServer.port))\n req = \"DUMP_CHUNKTOSERVERMAP\\r\\nVersion: KFS/1.0\\r\\nCseq: 1\\r\\n\\r\\n\"\n sock.send(req)\n sockIn = sock.makefile('r')\n for line in sockIn:\n if line.find('OK') == 0:\n continue\n if line.find('Cseq') == 0:\n continue\n if line.find('Status') == 0:\n continue\n if line.find('\\r\\n') == 0:\n break\n sock.close()\n \n # Gzip the file and scp over to dumMetaFile.gz and extract it\n print \"Compressing chunk map dump %s on %s\" % (defaultMetaFile, metaServer.node)\n command = \"ssh -o StrictHostKeyChecking=no %s gzip -f %s\" % (metaServer.node, defaultMetaFile)\n os.system(command)\n print \"Copying chunk map dump %s.gz to %s.gz\" % (defaultMetaFile, dumpMetaFile)\n command = \"scp -o StrictHostKeyChecking=no %s:%s.gz %s.gz\" % (metaServer.node, defaultMetaFile, dumpMetaFile)\n os.system(command)\n print \"Uncompressing chunk map dump %s.gz\" % (dumpMetaFile)\n command = \"gunzip -f %s.gz\" % dumpMetaFile\n os.system(command)\n\n print \"Creating symlink chunkmap.txt to %s\" % (dumpMetaFile)\n command = \"rm chunkmap.txt\"\n os.system(command)\n command = \"ln -s %s chunkmap.txt\" % (dumpMetaFile)\n os.system(command)",
"def dump():\n global CACHE\n return CACHE",
"def memory(kdump_memory):\n config_db = ConfigDBConnector()\n if config_db is not None:\n config_db.connect()\n config_db.mod_entry(\"KDUMP\", \"config\", {\"memory\": kdump_memory})",
"def dumpMemory():\n libxml2mod.xmlDumpMemory()",
"def pump_and_dump(func, article_id, start, end, dump_dir):\n \n data = func(article_id, start, end)\n \n dump_dir = dump_dir\n ref = start[:16] + '--' + end[:16] + '--' + 'threadtemp'\n file = dump_dir +'/' + ref + '.pickle'\n\n with open(file, 'wb') as f:\n pickle.dump(data, f)",
"def dumbcache_load(cache_dir=r'data\\cache'):\n DUMBCACHE = os.path.join(r'..', cache_dir, r'br_store.dmp')\n with open(DUMBCACHE, 'rb') as f:\n return pkl.load(f)",
"def genDump(exePath,inputDict,outputDict):\n paramFile = path.join(path.dirname(inputDict),'damo.par')\n with open(paramFile,'w') as f:\n f.write('DUMP'+'\\n')\n f.write(inputDict+'\\n')\n f.write(outputDict)\n runDamocles(exePath, paramFile)\n remove(paramFile)",
"def dump(args):\n if args.dump_command == \"trace\":\n _dump_trace(args)\n elif args.dump_command == \"checkpoint\":\n _dump_checkpoint(args)\n elif args.dump_command == \"config\":\n _dump_config(args)\n else:\n raise ValueError()",
"def daily(dbname, as_username='postgres'):\n\n filename = '{dbname}-{indate}.dump.sql'.format(\n dbname=dbname, indate=datetime.now().strftime('%Y-%m-%d'))\n backup_daily_dir = os.path.join(BACKUPS_STORE_DIR, 'daily')\n if not os.path.isdir(backup_daily_dir):\n os.makedirs(backup_daily_dir)\n\n dumpfile = execute_pgdump(dbname, as_username)\n dst = os.path.join(backup_daily_dir, filename)\n logger.info('moving {src} into {dst}'.format(src=dumpfile, dst=dst))\n shutil.move(dumpfile, dst)\n logger.info('{dst} has a size of {size} bytes.'.format(\n dst=dst, size=get_file_size(dst)))"
] | [
"0.57792044",
"0.53705996",
"0.52849925",
"0.52743363",
"0.5234966",
"0.518306",
"0.51442665",
"0.5070806",
"0.5030404",
"0.49894577",
"0.49146363",
"0.49089596",
"0.48556525",
"0.4828357",
"0.48120502",
"0.47813582",
"0.47782636",
"0.4737786",
"0.46923456",
"0.46380943",
"0.45736268",
"0.45575222",
"0.45132217",
"0.44417477",
"0.44284102",
"0.44216308",
"0.43894592",
"0.43817386",
"0.43554786",
"0.4351797"
] | 0.6442122 | 0 |
Red = Disable Blue = Enable Any problem such as plugins on dashboard is enable but show disable here, info Owner | async def plugin(self,ctx):
special_case = {"Anime":"myanimelist","Anti Raid":"antiraid"}
plugin_setting = await self.redis.hgetall("{}:Config:Cogs".format(ctx.message.guild.id))
embed = discord.Embed()
cogs = self.bot.cogs.keys()
for x in cogs:
setting = u"\U0001F534" #red
if x in ("Core", "Remindme", "Tools", "REPL","Events"): # A Owner's thing only.
if ctx.message.author.id != self.bot.owner.id:
continue
setting = u"\U0001F535" #blue
if x.lower() in plugin_setting or special_case.get(x) in plugin_setting:
setting = u"\U0001F535" #blue
embed.add_field(name = x,value = setting)
if ctx.message.guild.me.colour.value:
embed.colour = ctx.message.guild.me.colour
embed.set_footer(text = "{} = Disable | {} = Enable".format(u"\U0001F534",u"\U0001F535"))
await ctx.send(embed=embed) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_enabled(self):",
"def check_disabled(self):\n return None",
"def get_status(self):\n return super(Cabling, self).get_status()",
"def enable(self):",
"def Enabled(self) -> bool:",
"def disable(self):",
"def isEnabled(self):",
"def disable_feature(self,reason,source=\"gff3_maniger\"):\r\n date = datetime.datetime.now().strftime(\"%Y-%m-%d\")\r\n self.add_history(date,source,reason)\r\n self.active = False\r\n if self._owner_line.type == 'SNP':\r\n self._owner_line._owner_set.all_snp_disabled()",
"async def cog_check(self, ctx):\n guild_doc = await db.PLUGINS.find_one({\"_id\": ctx.guild.id})\n\n if guild_doc.get(\"Verification\"):\n return True\n\n else:\n await ctx.send(\n embed=discord.Embed(\n description=(\n f\"{var.E_DISABLE} The Verification plugin\"\n \" is disabled in this server\"\n ),\n color=var.C_ORANGE\n )\n )",
"def enable(self) -> None:",
"def disable(ctx):\n config_db = ConfigDBConnector()\n config_db.connect()\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"admin_mode\": \"disabled\"})",
"def disability_specify(self, instance):\r\n return instance.user.profile.disability_specify",
"def disable():\n boutonPierre[\"state\"] = \"disabled\"\n boutonFeuille[\"state\"] = \"disabled\"\n boutonCiseaux[\"state\"] = \"disabled\"",
"def check_enable_mode(self, *args, **kwargs):\n pass",
"def setEnabled(*args):",
"def setEnabled(*args):",
"def setEnabled(*args):",
"def setEnabled(*args):",
"def setEnabled(*args):",
"def setEnabled(*args):",
"def setEnabled(*args):",
"def setEnabled(*args):",
"def setEnabled(*args):",
"def setEnabled(*args):",
"def setEnabled(*args):",
"def get_everyone_denied(self):",
"def enable():\n boutonPierre[\"state\"] = \"normal\"\n boutonFeuille[\"state\"] = \"normal\"\n boutonCiseaux[\"state\"] = \"normal\"",
"def disable(self) -> None:",
"def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n rif_info = {}\n rif_info['FLEX_COUNTER_STATUS'] = 'disable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"RIF\", rif_info)",
"def disabled(name):\n return not enabled(name)"
] | [
"0.64461994",
"0.61870736",
"0.6155342",
"0.606294",
"0.60158396",
"0.59407747",
"0.5909308",
"0.5869749",
"0.5851598",
"0.58354104",
"0.5772434",
"0.5748269",
"0.574007",
"0.5675615",
"0.5668143",
"0.5668143",
"0.5668143",
"0.5668143",
"0.5668143",
"0.5668143",
"0.5668143",
"0.5668143",
"0.5668143",
"0.5668143",
"0.5668143",
"0.56668574",
"0.56544757",
"0.5642185",
"0.56349945",
"0.55991596"
] | 0.65774465 | 0 |
store input into filename used pickle.dump | def store (input, filename) :
cout = open (filename, 'w')
pickle.dump (input, cout)
cout.close () | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def store(self, filename):",
"def pickle(self,data,filename):\n pickle.dump(data, open(filename, 'wb'))",
"def save_file_data(name, obj, input_path='/inputs'):\n filename = '{}/{}.pkl'.format(input_path, name)\n if not os.path.exists(os.path.dirname(filename)):\n try:\n os.makedirs(os.path.dirname(filename))\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n with open(filename.format(name), 'wb+') as output:\n pickle.dump(obj, output)",
"def save(self, output, data):",
"def save(data, filename):\r\n with open(filename, 'wb') as fp:\r\n pickle.dump(data, fp)",
"def save(self,filename): \n with open(filename, 'wb') as f:\n pickle.dump(self,f)",
"def save(self,filename):\n with open(filename,'wb') as f:\n pickle.dump(self,f)",
"def save(self, filename):\n pickle.dump(self, open(filename + '.p', 'wb'), 2)",
"def store_pickle(fname, info, mode='w'):\n assert fname[-2:] == '.p'\n f = open(fname, mode)\n pickle.dump(info, f)\n f.close()",
"def psave(var, filename):\n pickle.dump(var, open(filename, 'wb'))",
"def save_object(self, filename, data):\n with open(filename, 'wb') as outp: # Overwrites any existing file.\n pickle.dump(data, outp, pickle.HIGHEST_PROTOCOL)",
"def save_as(self, filename):\n assert type(filename) == str, 'ERROR: filename should be type str'\n if '.pkl' in filename:\n with open(filename, 'wb') as f:\n dill.dump(self, f)\n else:\n with open(filename + '.pkl', 'wb') as f:\n dill.dump(self, f)",
"def save(fname, data):\r\n with open(fname, 'wb') as f:\r\n pickle.dump(data, f)",
"def save(self, filename):\n if '.pkl' not in filename:\n filename = filename + '.pkl'\n with open(filename, 'wb') as f:\n pickle.dump(self, f)",
"def save_input(self):\n if not os.path.exists(self.wdir):\n os.makedirs(self.wdir)\n\n with open(self.filepath, \"w\") as f:\n f.write(self.input_string)\n print(f\"-- Input file [{self.filename}] written successfully.\")",
"def save_var(filename, data, protocol = -1, allow_dill=False):\n if filename.endswith('.gz') :\n open_method = gzip.open\n else:\n open_method = open\n\n output = open_method(filename, 'wb')\n try:\n # Pickle dictionary using given protocol\n std_pickle.dump(data, output, protocol)\n finally:\n output.close()\n\n return",
"def save(self, filename):\n with open(filename, \"wb\") as f:\n pkl.dump(self, f)",
"def pickle_data(file_name, data):\n outfile = open(file_name, \"wb\")\n pickle.dump(data, outfile)\n outfile.close()",
"def save(self, filename, **kwargs):\n with open(filename, 'wb') as fin:\n pickle.dump(self, fin, **kwargs)",
"def save(self,filename):\n f = open(filename, 'wb')\n pickle.dump(self,f)\n f.close()",
"def save_pickle(file, path):\n with open(path, 'wb') as f:\n pickle.dump(file, f)\n file_name = re.findall(r\"/?[^/]+\", path)[-1].strip(\"/\")\n print(f\"Stored {file_name}.\")",
"def write_pickle_object_to_file(self, inpobj):\n with gzip.open('%s.tmp' % self.pickle_file, 'wb') as pkl_file:\n pickle.dump(inpobj, pkl_file, pickle.HIGHEST_PROTOCOL)\n run_command('mv %s.tmp %s' % (self.pickle_file, self.pickle_file))\n return True",
"def save_pickle(filename, data, override=True):\n filename = \"{}.pkl\".format(filename)\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n\n if override == False:\n filename = add_unique_postfix(filename)\n\n with open(filename, \"wb\") as f:\n pickle.dump(data, f)\n\n return filename",
"def save(self,fn):\n fn = fn if fn[-4:] == \".pkl\" else fn+\".pkl\"\n with open(fn,\"wb+\") as f:\n pickle.dump(self,f)\n log(\"Saved reader to {}\".format(fn))",
"def save(object, filename, protocol = 0):\n file = gzip.GzipFile(filename, 'wb')\n file.write(pickle.dumps(object, protocol))\n file.close()",
"def pickleSave(object, filename):\n #Todo: Handle exceptions from pickle\n filehandler = open(\"obj/\" + filename + \".obj\", 'wb')\n pickle.dump(object, filehandler)",
"def save(self, filename):\n cPickle.dump(self, open(filename, \"wb\"))",
"def pickle_data(filename, data):\n f = open(filename, \"wb\")\n pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)\n f.close()",
"def save(self, file_name):\n saved_data = { \"start_config\" : self.start_config, \"action_storage\" : self.action_storage } \n with open(file_name, 'wb') as fh:\n pickle.dump(saved_data, fh)",
"def saveVar(var,name):\n with open(name+'.pickle','wb') as fl:\n pickle.dump(var,fl)"
] | [
"0.7272676",
"0.7110179",
"0.69229895",
"0.67838925",
"0.6766273",
"0.67407393",
"0.67352134",
"0.6724542",
"0.6718559",
"0.6689364",
"0.6684626",
"0.6666956",
"0.6617081",
"0.6609867",
"0.65701944",
"0.65609634",
"0.6513856",
"0.6507621",
"0.6492011",
"0.64900124",
"0.64834404",
"0.64727414",
"0.6456504",
"0.6442476",
"0.64262193",
"0.64228386",
"0.64211506",
"0.64107746",
"0.64028895",
"0.6402235"
] | 0.89019084 | 0 |
Sends args and kwargs to any configured callbacks. This handles the cases where the 'callbacks' variable is ``None``, a single function, or a list. | def _multiple_callbacks(callbacks, *args, **kwargs):
if isinstance(callbacks, list):
for cb in callbacks:
cb(*args, **kwargs)
return
if callbacks:
callbacks(*args, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_callbacks(self, **kwargs):\n for callback in self.CALLBACKS:\n getattr(self, callback)(**kwargs)",
"def callbacks(*args, addCallback: Script=None, clearAllCallbacks: bool=True, clearCallbacks:\n bool=True, describeHooks: bool=True, dumpCallbacks: bool=True, executeCallbacks:\n bool=True, hook: AnyStr=\"\", listCallbacks: bool=True, owner: AnyStr=\"\",\n removeCallback: Script=None, **kwargs)->List[AnyStr]:\n pass",
"def __call__(self, args, kwargs):\n callback = self._callback_ref()\n if callback is not None:\n callback(*args, **kwargs)",
"def subscribe_callbacks(self, *args):\n for arg in args:\n self.callbacks.append(arg)",
"def trigger(self, callback_type, *args):\n if self.callbacks.has_key(callback_type):\n for cb in self.callbacks[callback_type]:\n cb(*args)",
"def callback(self, *args, **kwargs):\n\n for key, (methodwrapper, onetime) in list(self.callbacks.items()):\n try:\n methodwrapper(*args, **kwargs)\n except:\n log.err()\n\n if onetime:\n del self.callbacks[key]",
"def callback(self, fun: Callable[[], None] | None) -> None:",
"def _forward_cb(self, *args, **kwargs):\n for callback_function in self.changeCallbacks:\n callback_function(*args, **kwargs)",
"def _run_callbacks(cls, cb_method, *args):\n global CALLBACKS\n for c in CALLBACKS:\n attr = getattr(c, cb_method)\n attr(*args)",
"def execute_callbacks(self, name, *args, **kwargs):\n callbacks = self.callbacks.get(name, {}).items()\n for order, func in callbacks:\n func(self, *args, **kwargs)\n\n return len(callbacks)",
"def handle_admincallbacks(bot, ievent):\n cbs = getcallbacktable()\n if not ievent.rest: ievent.reply(\"callbacks: \", cbs)\n else:\n try: ievent.reply(\"%s callbacks: \" % ievent.rest, cbs[ievent.rest])\n except KeyError: ievent.reply(\"no such callbacks available\")",
"def get_callbacks(callbacks: List[str]) -> List:\n callback_functions = []\n for callback in callbacks:\n try:\n callback_functions.append(eval(callback))\n except (NameError, AttributeError) as err:\n sys.exit(f'{err}. Accepted callbacks from {tf}, {sm}, {tfa}')\n return callback_functions",
"def set_callbacks(self, **dic_functions):\n for action in self.interface.CALLBACKS:\n try:\n f = dic_functions[action]\n except KeyError:\n pass\n else:\n setattr(self.interface.callbacks, action, f)\n manquantes = [\n a for a in self.interface.CALLBACKS if not a in dic_functions]\n if not manquantes:\n logging.debug(\n f\"{self.__class__.__name__} : Tous les callbacks demandés sont fournis.\")\n else:\n logging.warning(\n f\"{self.__class__.__name__} didn't set asked callbacks {manquantes}\")",
"def dispatch_callback(self, callback):\n self.callback_queue.put(lambda: callback.func(*callback.args))",
"def _trigger_callback(self, *args, **kwargs):\n for callback_function in self.changeCallbacks:\n callback_function(self, *args, **kwargs)",
"def execute(self):\n args = self.args\n \n for callNumber in xrange(len(self.callback)):\n args = (self.callback.pop(0))(*args)\n \n if not (isinstance(args,tuple) or isinstance(args,list)):\n args = [args]\n \n return args",
"def initialise_callbacks():\n adapter = mice.ice.createObjectAdapterWithEndpoints(\"Callback.Client\", \"tcp -h 127.0.0.1\")\n adapter.activate()\n cb=mice.Murmur.ServerCallbackPrx.uncheckedCast(adapter.addWithUUID(ServerCallbackI(s, adapter)))\n s.addCallback(cb)",
"def handle_cb(self, obj, cb, kwargs):\n callbacks = kwargs.get(\"callbacks\", {})\n callback_exception_failure = kwargs.get(\"callback_exception_failure\", True)\n\n callback = callbacks.get(cb, None)\n if callback:\n m = \"Running callback function {f} for {n}\".format\n m = m(f=callback, n=cb)\n self.mylog.debug(m)\n try:\n obj = callback(handler=self, obj=obj, kwargs=kwargs)\n except Exception as e:\n if callback_exception_failure:\n raise\n else:\n m = \"Exception occurred in callback function {f} for {n}: {e}\".format\n m = m(f=callback, n=cb, e=e)\n self.mylog.exception(m)\n else:\n m = \"No callback function specified for {n}\".format\n m = m(n=cb)\n self.mylog.debug(m)\n return obj",
"def perform_callback(self, *args, **kwargs):\n pass",
"def execute(self):\n\t\tfor callback in self:\n\t\t\tcallback()",
"def setEventCallbacks(self, callbacks):\n self.ws.setEventCallbacks(callbacks)",
"def fit_callbacks():\n # pylint: disable=no-value-for-parameter\n return model_callbacks() + logging_callbacks()\n # pylint: enable=no-value-for-parameter",
"def __init__(self, callback, *args, **kwargs):\n self.callback = lambda: callback(*args, **kwargs)",
"def runCallback(self, callback=\"help\"):\n self.initialize()\n\n # run the start callback\n tools.run_callback(\"start\", {'request': self._request})\n\n config = self._request.getConfig()\n data = self._request.getData()\n\n # invoke all callbacks for the 'callback'\n handled = tools.run_callback(callback,\n {'request': self._request},\n mappingfunc=lambda x,y:x,\n donefunc=lambda x:x)\n\n # do end callback\n tools.run_callback(\"end\", {'request': request})",
"def execute(self):\n results = []\n \n for callback in self.callback:\n results.append(callback(*self.args))\n \n return results",
"def do_callback(self, sensor):\n if sensor in self._callbacks:\n for callback in self._callbacks[sensor]:\n try:\n callback(None)\n except Exception as e:\n self.stick.logger.error(\n \"Error while executing all callback : %s\",\n e,\n )",
"def collecting_callback():\n calls = []\n\n def cb(**kwargs):\n calls.append(kwargs)\n\n return cb, calls",
"def set_callbacks(self):\n\t\tself.set_floor_callbacks()\n\t\tself.set_button_callbacks()\n\t\tself.set_stop_callback()",
"def on_post_execution(**kwargs):\n logging.debug(\"Calling callbacks: %s\", __post_exec_callbacks)\n for callback in __post_exec_callbacks:\n try:\n callback(**kwargs)\n except Exception:\n logging.exception(\"Failed on post-execution callback using %s\", callback)",
"def on_pre_execution(**kwargs):\n logging.debug(\"Calling callbacks: %s\", __pre_exec_callbacks)\n for callback in __pre_exec_callbacks:\n try:\n callback(**kwargs)\n except Exception:\n logging.exception(\"Failed on pre-execution callback using %s\", callback)"
] | [
"0.702277",
"0.66514504",
"0.65428776",
"0.651266",
"0.6477689",
"0.61787194",
"0.6089447",
"0.60846204",
"0.60464215",
"0.60348064",
"0.59095436",
"0.590122",
"0.5900241",
"0.5882658",
"0.5858614",
"0.5747529",
"0.57051486",
"0.5680165",
"0.56781036",
"0.5653725",
"0.5636546",
"0.5616152",
"0.55880874",
"0.55756146",
"0.55564874",
"0.5518041",
"0.5517807",
"0.5497091",
"0.54964554",
"0.5467474"
] | 0.7574884 | 0 |
Adds and connects attributes from default encore FKIK switch anim setup to rig nodes in scene Imports default control setup from file or you may specify source_ctrl in args to override | def make_fkikSwitch_connection_attrs(partpre=None, side='Lt', source_ctrl=None, tag_name='switch', snapTo=None,
add_attrs=None):
switch_anim = ''
if source_ctrl is not None:
switch_anim = source_ctrl
partpre = partpre
if partpre == '':
partpre = 'mypart_'
if source_ctrl is None:
# filepath = r'C:/Users/Nicob/Documents/maya/scripts/rigBot/rigBot/config/switcher_anim.mb'
system_base_path = os.path.dirname(utils.__file__)
base_path = os.path.join(system_base_path, 'config')
file_path = os.path.join(base_path, 'switcher_anim.mb')
newnodes = mc.file(filepath, i=1, ignoreVersion=1, rnn=1, mergeNamespacesOnClash=0, rpr=partpre, ra=1,
options="v=0;", pr=1)
switch_anim = partpre + '_CTL'
# pos switcher grpOffset node if snapTo
if snapTo is not None:
utils.snap_to_transform(snapTo, switch_anim.replace('CTL', 'grpOffset'))
mc.setAttr(switch_anim.replace('CTL', 'grpOffset') + '.r', 0, 0, 0)
# get value of tags and sort into ik and fk vis groups
iks = []
fks = []
nodes = mc.ls('*.' + tag_name)
for node in nodes:
if partpre in node and side in node:
mode = mc.getAttr(node)
if mode:
mode = mode.lower()
if 'ik' in mode:
iks.append(node.split('.')[0])
if 'fk' in mode:
fks.append(node.split('.')[0])
for ik in iks:
# ikparpar=utils.get_parent(ik)
ikpar = utils.get_parent(ik)
if ikpar is None:
mc.connectAttr(switch_anim + '.FK_IK', ik + '.visiblity', f=1)
else:
mc.connectAttr(switch_anim + '.FK_IK', ikpar + '.visibility', f=1)
rvn = mc.createNode('reverse', name=switch_anim + '_fkik_vis_rv')
mc.connectAttr(switch_anim + '.FK_IK', rvn + '.inputX')
for fk in fks:
fkpar = utils.get_parent(fk)
if fkpar:
mc.connectAttr(rvn + '.outputX', fkpar + '.visibility', f=1)
if add_attrs is not None:
for att in add_attrs:
mc.addAttr(switch_anim, ln=att, min=0, max=1, dv=0, k=1)
nns = []
for nn in reversed(newnodes):
nnn = ''
sn = nn.split("|")
nnn = mc.rename(nn, sn[-1])
nns.append(nnn)
anim = mc.ls(partpre + '_CTL')
# if mc.objExists (partpre+'_skeleton_grp'):
# mc.parent (anim, partpre+'_skeleton_grp' )
return anim | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def switch_setup(params, rig, ik_joints):\n\n # Duplicate for bind skeleton\n skeleton = [x.name() for x in params['ikSkeleton']]\n bind_skeleton = cmds.duplicate(skeleton, n=skeleton[0] + '_bnd_0')\n #bind_skeleton\n\n # Hide all attribute on Controller\n fkikcontrol = params['fkIkSwitch'].name()\n attrs = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz', 'sx', 'sy', 'sz', 'v']\n for i in attrs:\n cmds.setAttr('{node}.{attr}'.format(node=fkikcontrol, attr=i), k=False, cb=False)\n\n # Create FK/IK Switch attributes\n cmds.addAttr(fkikcontrol, sn='FKIKBlend', at='float', min=0, max=1, dv=0, k=True)\n cmds.addAttr(fkikcontrol, sn='AutoVis', at='bool', dv=1, k=True)\n cmds.addAttr(fkikcontrol, ln='FKVis', at='bool', dv=1, k=True)\n cmds.addAttr(fkikcontrol, ln='IKVis', at='bool', dv=1, k=True)\n\n # create control offset transforms\n # par = cmds.listRelatives(fkikcontrol, parent=True)\n # buf = create_offset_transform(fkikcontrol, BUF)\n # cmds.parent(fkikcontrol, buf)\n # if par: cmds.parent(buf, par[0])\n\n # Parent Skeleton to rig group\n ik_skeleton = [x.name() for x in params['ikSkeleton']]\n fk_skeleton = [x.name() for x in params['fkSkeleton']]\n cmds.parent(ik_skeleton[0], rig['rigGroup'])\n cmds.parent(fk_skeleton[0], rig['rigGroup'])\n\n # Constraint Bind Skeleton\n fk_ik_finish(ik_joints, bind_skeleton, params)",
"def _set_attr(self):\n self.as_skeletal = self._import_as_skeleton()\n self.materials = self._import_materials()\n self.textures = self._import_textures()",
"def init_from_file(self):\n self.src.load('start.00') \n self.oe1.load('start.01')\n #self.det.load('start.02')\n print('NOTE: variables loaded from start.00/start.01 files')",
"def setup(args):\n cfg = get_cfg()\n add_imaterialist_config(cfg)\n cfg.merge_from_file(model_zoo.get_config_file(\"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml\"))\n cfg.merge_from_file(args.config_file)\n \n cfg.merge_from_list(args.opts)\n cfg.freeze()\n default_setup(cfg, args)\n # Setup logger for \"imaterialist\" module\n setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name=\"imaterialist\")\n return cfg",
"def setup_threeCtrl(lf_lidrails, rt_lidrails):\n # Declare control variables\n lf_up = ['lf_lid01_tp01_ccc', 'lf_lid01_tp02_ccc', 'lf_lid01_tp03_ccc']\n lf_dn = ['lf_lid01_dn01_ccc', 'lf_lid01_dn02_ccc', 'lf_lid01_dn03_ccc']\n rt_up = ['rt_lid01_tp01_ccc', 'rt_lid01_tp02_ccc', 'rt_lid01_tp03_ccc']\n rt_dn = ['rt_lid01_dn01_ccc', 'rt_lid01_dn02_ccc', 'rt_lid01_dn03_ccc']\n\n # Connect lidRails ramps to lid profile controls\n\n # ========\n # lf_up\n\n # inner\n cmds.connectAttr(lf_up[0] + '.tx', lf_lidrails + '.offsettop[0].offsettop_Position', f=True)\n cmds.connectAttr(lf_up[0] + '.ty', lf_lidrails + '.offsettop[0].offsettop_FloatValue', f=True)\n # mid\n lf_lid01_um01_adn = cmds.createNode('addDoubleLinear', n='lf_lid01_um01_addDoubleLinear')\n cmds.connectAttr(lf_up[1] + '.tx', lf_lid01_um01_adn + '.input1', f=True)\n cmds.setAttr(lf_lid01_um01_adn + '.input2', 0.5)\n cmds.connectAttr(lf_lid01_um01_adn + '.output', lf_lidrails + '.offsettop[1].offsettop_Position', f=True)\n cmds.connectAttr(lf_up[1] + '.ty', lf_lidrails + '.offsettop[1].offsettop_FloatValue', f=True)\n # outer\n lf_lid01_uo01_adn = cmds.createNode('addDoubleLinear', n='lf_lid01_uo01_addDoubleLinear')\n cmds.connectAttr(lf_up[2] + '.tx', lf_lid01_uo01_adn + '.input1', f=True)\n cmds.setAttr(lf_lid01_uo01_adn + '.input2', 1.0)\n cmds.connectAttr(lf_lid01_uo01_adn + '.output', lf_lidrails + '.offsettop[2].offsettop_Position', f=True)\n cmds.connectAttr(lf_up[2] + '.ty', lf_lidrails + '.offsettop[2].offsettop_FloatValue', f=True)\n\n # ========\n # lf_dn\n\n # Reverse node\n lf_dn_rvn = cmds.createNode('reverse', n='lf_lid01_dn01_reverse')\n # inner\n cmds.connectAttr(lf_dn[0] + '.tx', lf_lidrails + '.offsetbottom[0].offsetbottom_Position', f=True)\n cmds.connectAttr(lf_dn[0] + '.ty', lf_dn_rvn + '.inputX', f=True)\n cmds.connectAttr(lf_dn_rvn + '.outputX', lf_lidrails + '.offsetbottom[0].offsetbottom_FloatValue', f=True)\n # mid\n lf_lid01_dm01_adn = cmds.createNode('addDoubleLinear', n='lf_lid01_dm01_addDoubleLinear')\n cmds.connectAttr(lf_dn[1] + '.tx', lf_lid01_dm01_adn + '.input1', f=True)\n cmds.setAttr(lf_lid01_dm01_adn + '.input2', 0.5)\n cmds.connectAttr(lf_lid01_dm01_adn + '.output', lf_lidrails + '.offsetbottom[1].offsetbottom_Position', f=True)\n cmds.connectAttr(lf_dn[1] + '.ty', lf_dn_rvn + '.inputY', f=True)\n cmds.connectAttr(lf_dn_rvn + '.outputY', lf_lidrails + '.offsetbottom[1].offsetbottom_FloatValue', f=True)\n # outer\n lf_lid01_do01_adn = cmds.createNode('addDoubleLinear', n='lf_lid01_do01_addDoubleLinear')\n cmds.connectAttr(lf_dn[2] + '.tx', lf_lid01_do01_adn + '.input1', f=True)\n cmds.setAttr(lf_lid01_do01_adn + '.input2', 1.0)\n cmds.connectAttr(lf_lid01_do01_adn + '.output', lf_lidrails + '.offsetbottom[2].offsetbottom_Position', f=True)\n cmds.connectAttr(lf_dn[2] + '.ty', lf_dn_rvn + '.inputZ', f=True)\n cmds.connectAttr(lf_dn_rvn + '.outputZ', lf_lidrails + '.offsetbottom[2].offsetbottom_FloatValue', f=True)\n\n # ========\n # rt_up\n\n # inner\n rt_lid01_ui01_asn = cmds.createNode('plusMinusAverage', n='rt_lid01_ui01_plusMinusAverage')\n cmds.setAttr(rt_lid01_ui01_asn + '.operation', 2)\n cmds.setAttr(rt_lid01_ui01_asn + '.input1D[0]', 1.0)\n cmds.connectAttr(rt_up[0] + '.tx', rt_lid01_ui01_asn + '.input1D[1]', f=True)\n cmds.connectAttr(rt_lid01_ui01_asn + '.output1D', rt_lidrails + '.offsettop[2].offsettop_Position', f=True)\n cmds.connectAttr(rt_up[0] + '.ty', rt_lidrails + '.offsettop[2].offsettop_FloatValue', f=True)\n # mid\n rt_lid01_um01_mdn = cmds.createNode('multDoubleLinear', n='rt_lid01_um01_multDoubleLinear')\n rt_lid01_um01_adn = cmds.createNode('addDoubleLinear', n='rt_lid01_um01_addDoubleLinear')\n cmds.connectAttr(rt_up[1] + '.tx', rt_lid01_um01_mdn + '.input1', f=True)\n cmds.setAttr(rt_lid01_um01_mdn + '.input2', -1.0)\n cmds.connectAttr(rt_lid01_um01_mdn + '.output', rt_lid01_um01_adn + '.input1', f=True)\n cmds.setAttr(rt_lid01_um01_adn + '.input2', 0.5)\n cmds.connectAttr(rt_lid01_um01_adn + '.output', rt_lidrails + '.offsettop[1].offsettop_Position', f=True)\n cmds.connectAttr(rt_up[1] + '.ty', rt_lidrails + '.offsettop[1].offsettop_FloatValue', f=True)\n # outer\n rt_lid01_uo_mdn = cmds.createNode('multDoubleLinear', n='rt_lid01_uo_multDoubleLinear')\n cmds.connectAttr(rt_up[2] + '.tx', rt_lid01_uo_mdn + '.input1', f=True)\n cmds.setAttr(rt_lid01_uo_mdn + '.input2', -1.0)\n cmds.connectAttr(rt_lid01_uo_mdn + '.output', rt_lidrails + '.offsettop[0].offsettop_Position', f=True)\n cmds.connectAttr(rt_up[2] + '.ty', rt_lidrails + '.offsettop[0].offsettop_FloatValue', f=True)\n\n # ========\n # rt_dn\n\n # Reverse node\n rt_dn_rvn = cmds.createNode('reverse', n='rt_lid01_dn01_reverse')\n # inner\n rt_lid01_di01_asn = cmds.createNode('plusMinusAverage', n='rt_lid01_di01_plusMinusAverage')\n cmds.setAttr(rt_lid01_di01_asn + '.operation', 2)\n cmds.setAttr(rt_lid01_di01_asn + '.input1D[0]', 1.0)\n cmds.connectAttr(rt_dn[0] + '.tx', rt_lid01_di01_asn + '.input1D[1]', f=True)\n cmds.connectAttr(rt_lid01_di01_asn + '.output1D', rt_lidrails + '.offsetbottom[0].offsetbottom_Position', f=True)\n cmds.connectAttr(rt_dn[0] + '.ty', rt_dn_rvn + '.inputX', f=True)\n cmds.connectAttr(rt_dn_rvn + '.outputX', rt_lidrails + '.offsetbottom[0].offsetbottom_FloatValue', f=True)\n # mid\n rt_lid01_dm01_asn = cmds.createNode('plusMinusAverage', n='rt_lid01_dm01_plusMinusAverage')\n cmds.setAttr(rt_lid01_dm01_asn + '.operation', 2)\n cmds.setAttr(rt_lid01_dm01_asn + '.input1D[0]', 0.5)\n cmds.connectAttr(rt_dn[1] + '.tx', rt_lid01_dm01_asn + '.input1D[1]', f=True)\n cmds.connectAttr(rt_lid01_dm01_asn + '.output1D', rt_lidrails + '.offsetbottom[1].offsetbottom_Position', f=True)\n cmds.connectAttr(rt_dn[1] + '.ty', rt_dn_rvn + '.inputY', f=True)\n cmds.connectAttr(rt_dn_rvn + '.outputY', rt_lidrails + '.offsetbottom[1].offsetbottom_FloatValue', f=True)\n # outer\n rt_lid01_do01_mdn = cmds.createNode('multDoubleLinear', n='rt_lid01_do01_multDoubleLinear')\n cmds.connectAttr(rt_dn[2] + '.tx', rt_lid01_do01_mdn + '.input1', f=True)\n cmds.setAttr(rt_lid01_do01_mdn + '.input2', -1.0)\n cmds.connectAttr(rt_lid01_do01_mdn + '.output', rt_lidrails + '.offsetbottom[2].offsetbottom_Position', f=True)\n cmds.connectAttr(rt_dn[2] + '.ty', rt_dn_rvn + '.inputZ', f=True)\n cmds.connectAttr(rt_dn_rvn + '.outputZ', rt_lidrails + '.offsetbottom[2].offsetbottom_FloatValue', f=True)",
"def __init__(self, yaml_file = 'options_modeling.yaml'):\n\n self.reproj_th = 2.5\n self.min_matched_views = 3\n self.descriptors = {'SIFT': 'sift'} # Descriptor name and module name\n self.mask_suffix = '*_mask.png'\n \n # If there is an options file, it will overwrite the defaults \n if yaml_file is not None:\n self.load(yaml_file)",
"def connectControl(*args, fileName: bool=True, index: int=0, preventContextualMenu: bool=True,\n preventOverride: bool=True, **kwargs)->None:\n pass",
"def setup_fourCtrl(lf_lidrails, rt_lidrails):\n # Declare control variables\n lf_up = ['L_upperLid1_ctrl', 'L_upperLid2_ctrl', 'L_upperLid3_ctrl', 'L_upperLid4_ctrl']\n lf_dn = ['L_lowerLid1_ctrl', 'L_lowerLid2_ctrl', 'L_lowerLid3_ctrl', 'L_lowerLid4_ctrl']\n rt_up = ['R_upperLid1_ctrl', 'R_upperLid2_ctrl', 'R_upperLid3_ctrl', 'R_upperLid4_ctrl']\n rt_dn = ['R_lowerLid1_ctrl', 'R_lowerLid2_ctrl', 'R_lowerLid3_ctrl', 'R_lowerLid4_ctrl']\n\n # Connect lidRails ramps to lid profile controls\n\n # lf_up =========\n\n # inner\n cmds.connectAttr(lf_up[0] + '.tx', lf_lidrails + '.offsettop[0].offsettop_Position', f=True)\n cmds.connectAttr(lf_up[0] + '.ty', lf_lidrails + '.offsettop[0].offsettop_FloatValue', f=True)\n # mid - inner\n lf_lid01_um01_adn = cmds.createNode('addDoubleLinear', n='lf_lid01_um01_addDoubleLinear')\n cmds.connectAttr(lf_up[1] + '.tx', lf_lid01_um01_adn + '.input1', f=True)\n cmds.setAttr(lf_lid01_um01_adn + '.input2', 0.333)\n cmds.connectAttr(lf_lid01_um01_adn + '.output', lf_lidrails + '.offsettop[1].offsettop_Position', f=True)\n cmds.connectAttr(lf_up[1] + '.ty', lf_lidrails + '.offsettop[1].offsettop_FloatValue', f=True)\n # mid - outer\n lf_lid01_um02_adn = cmds.createNode('addDoubleLinear', n='lf_lid01_um02_addDoubleLinear')\n cmds.connectAttr(lf_up[2] + '.tx', lf_lid01_um02_adn + '.input1', f=True)\n cmds.setAttr(lf_lid01_um02_adn + '.input2', 0.666)\n cmds.connectAttr(lf_lid01_um02_adn + '.output', lf_lidrails + '.offsettop[2].offsettop_Position', f=True)\n cmds.connectAttr(lf_up[2] + '.ty', lf_lidrails + '.offsettop[2].offsettop_FloatValue', f=True)\n # outer\n lf_lid01_uo01_adn = cmds.createNode('addDoubleLinear', n='lf_lid01_uo01_addDoubleLinear')\n cmds.connectAttr(lf_up[3] + '.tx', lf_lid01_uo01_adn + '.input1', f=True)\n cmds.setAttr(lf_lid01_uo01_adn + '.input2', 1.0)\n cmds.connectAttr(lf_lid01_uo01_adn + '.output', lf_lidrails + '.offsettop[3].offsettop_Position', f=True)\n cmds.connectAttr(lf_up[3] + '.ty', lf_lidrails + '.offsettop[3].offsettop_FloatValue', f=True)\n\n # lf_dn =========\n\n lf_dn_rvn = cmds.createNode('reverse', n='lf_lid01_dn01_reverse')\n lf_dn02_rvn = cmds.createNode('reverse', n='lf_lid01_dn02_reverse')\n # inner\n cmds.connectAttr(lf_dn[0] + '.tx', lf_lidrails + '.offsetbottom[0].offsetbottom_Position', f=True)\n cmds.connectAttr(lf_dn[0] + '.ty', lf_dn_rvn + '.inputX', f=True)\n cmds.connectAttr(lf_dn_rvn + '.outputX', lf_lidrails + '.offsetbottom[0].offsetbottom_FloatValue', f=True)\n # mid - inner\n lf_lid01_dm01_adn = cmds.createNode('addDoubleLinear', n='lf_lid01_dm01_addDoubleLinear')\n cmds.connectAttr(lf_dn[1] + '.tx', lf_lid01_dm01_adn + '.input1', f=True)\n cmds.setAttr(lf_lid01_dm01_adn + '.input2', 0.333)\n cmds.connectAttr(lf_lid01_dm01_adn + '.output', lf_lidrails + '.offsetbottom[1].offsetbottom_Position', f=True)\n cmds.connectAttr(lf_dn[1] + '.ty', lf_dn_rvn + '.inputY', f=True)\n cmds.connectAttr(lf_dn_rvn + '.outputY', lf_lidrails + '.offsetbottom[1].offsetbottom_FloatValue', f=True)\n # mid - outer\n lf_lid01_dm02_adn = cmds.createNode('addDoubleLinear', n='lf_lid01_dm02_addDoubleLinear')\n cmds.connectAttr(lf_dn[2] + '.tx', lf_lid01_dm02_adn + '.input1', f=True)\n cmds.setAttr(lf_lid01_dm02_adn + '.input2', 0.666)\n cmds.connectAttr(lf_lid01_dm02_adn + '.output', lf_lidrails + '.offsetbottom[2].offsetbottom_Position', f=True)\n cmds.connectAttr(lf_dn[2] + '.ty', lf_dn02_rvn + '.inputX', f=True)\n cmds.connectAttr(lf_dn02_rvn + '.outputX', lf_lidrails + '.offsetbottom[2].offsetbottom_FloatValue', f=True)\n # outer\n lf_lid01_do01_adn = cmds.createNode('addDoubleLinear', n='lf_lid01_do01_addDoubleLinear')\n cmds.connectAttr(lf_dn[3] + '.tx', lf_lid01_do01_adn + '.input1', f=True)\n cmds.setAttr(lf_lid01_do01_adn + '.input2', 1.0)\n cmds.connectAttr(lf_lid01_do01_adn + '.output', lf_lidrails + '.offsetbottom[3].offsetbottom_Position', f=True)\n cmds.connectAttr(lf_dn[3] + '.ty', lf_dn02_rvn + '.inputY')\n cmds.connectAttr(lf_dn02_rvn + '.outputY', lf_lidrails + '.offsetbottom[3].offsetbottom_FloatValue', f=True)\n\n # rt_up =========\n\n # inner\n rt_lid01_ui01_asn = cmds.createNode('plusMinusAverage', n='rt_lid01_ui01_plusMinusAverage')\n cmds.setAttr(rt_lid01_ui01_asn + '.input1D[0]', 1.0)\n cmds.connectAttr(rt_up[0] + '.tx', rt_lid01_ui01_asn + '.input1D[1]', f=True)\n cmds.connectAttr(rt_lid01_ui01_asn + '.output1D', rt_lidrails + '.offsettop[3].offsettop_Position', f=True)\n cmds.connectAttr(rt_up[0] + '.ty', rt_lidrails + '.offsettop[3].offsettop_FloatValue', f=True)\n # mid -inner\n rt_lid01_um01_mdn = cmds.createNode('multDoubleLinear', n='rt_lid01_um01_multDoubleLinear')\n rt_lid01_um01_adn = cmds.createNode('addDoubleLinear', n='rt_lid01_um01_addDoubleLinear')\n cmds.connectAttr(rt_up[2] + '.tx', rt_lid01_um01_mdn + '.input1', f=True)\n cmds.setAttr(rt_lid01_um01_mdn + '.input2', -1.0)\n cmds.connectAttr(rt_lid01_um01_mdn + '.output', rt_lid01_um01_adn + '.input1', f=True)\n cmds.setAttr(rt_lid01_um01_adn + '.input2', 0.333)\n cmds.connectAttr(rt_lid01_um01_adn + '.output', rt_lidrails + '.offsettop[2].offsettop_Position', f=True)\n cmds.connectAttr(rt_up[2] + '.ty', rt_lidrails + '.offsettop[2].offsettop_FloatValue', f=True)\n\n # mid - outer\n rt_lid01_um02_mdn = cmds.createNode('multDoubleLinear', n='rt_lid01_um02_multDoubleLinear')\n rt_lid01_um02_adn = cmds.createNode('addDoubleLinear', n='rt_lid01_um02_addDoubleLinear')\n cmds.connectAttr(rt_up[1] + '.tx', rt_lid01_um02_mdn + '.input1', f=True)\n cmds.setAttr(rt_lid01_um02_mdn + '.input2', -1.0)\n cmds.connectAttr(rt_lid01_um02_mdn + '.output', rt_lid01_um02_adn + '.input1', f=True)\n cmds.setAttr(rt_lid01_um02_adn + '.input2', 0.666)\n cmds.connectAttr(rt_lid01_um02_adn + '.output', rt_lidrails + '.offsettop[1].offsettop_Position', f=True)\n cmds.connectAttr(rt_up[1] + '.ty', rt_lidrails + '.offsettop[1].offsettop_FloatValue', f=True)\n\n # outer\n rt_lid01_uo_mdn = cmds.createNode('multDoubleLinear', n='rt_lid01_uo_multDoubleLinear')\n cmds.connectAttr(rt_up[3] + '.tx', rt_lid01_uo_mdn + '.input1', f=True)\n cmds.setAttr(rt_lid01_uo_mdn + '.input2', -1.0)\n cmds.connectAttr(rt_lid01_uo_mdn + '.output', rt_lidrails + '.offsettop[0].offsettop_Position', f=True)\n cmds.connectAttr(rt_up[3] + '.ty', rt_lidrails + '.offsettop[0].offsettop_FloatValue', f=True)\n\n # rt_dn =========\n\n rt_dn_rvn = cmds.createNode('reverse', n='rt_lid01_dn01_reverse')\n rt_dn02_rvn = cmds.createNode('reverse', n='rt_lid01_dn02_reverse')\n # inner\n rt_lid01_di01_asn = cmds.createNode('plusMinusAverage', n='rt_lid01_di01_plusMinusAverage')\n cmds.setAttr(rt_lid01_di01_asn + '.operation', 2)\n cmds.setAttr(rt_lid01_di01_asn + '.input1D[0]', 1.0)\n cmds.connectAttr(rt_dn[0] + '.tx', rt_lid01_di01_asn + '.input1D[1]', f=True)\n cmds.connectAttr(rt_lid01_di01_asn + '.output1D', rt_lidrails + '.offsetbottom[0].offsetbottom_Position', f=True)\n cmds.connectAttr(rt_dn[0] + '.ty', rt_dn_rvn + '.inputX', f=True)\n cmds.connectAttr(rt_dn_rvn + '.outputX', rt_lidrails + '.offsetbottom[0].offsetbottom_FloatValue', f=True)\n # mid - inner\n rt_lid01_dm01_asn = cmds.createNode('plusMinusAverage', n='rt_lid01_dm01_plusMinusAverage')\n cmds.setAttr(rt_lid01_dm01_asn + '.operation', 2)\n cmds.setAttr(rt_lid01_dm01_asn + '.input1D[0]', 0.333)\n cmds.connectAttr(rt_dn[2] + '.tx', rt_lid01_dm01_asn + '.input1D[1]', f=True)\n cmds.connectAttr(rt_lid01_dm01_asn + '.output1D', rt_lidrails + '.offsetbottom[1].offsetbottom_Position', f=True)\n cmds.connectAttr(rt_dn[2] + '.ty', rt_dn_rvn + '.inputY', f=True)\n cmds.connectAttr(rt_dn_rvn + '.outputY', rt_lidrails + '.offsetbottom[1].offsetbottom_FloatValue', f=True)\n # mid - outer\n rt_lid01_dm02_asn = cmds.createNode('plusMinusAverage', n='rt_lid01_dm02_plusMinusAverage')\n cmds.setAttr(rt_lid01_dm02_asn + '.operation', 2)\n cmds.setAttr(rt_lid01_dm02_asn + '.input1D[0]', 0.666)\n cmds.connectAttr(rt_dn[1] + '.tx', rt_lid01_dm02_asn + '.input1D[1]', f=True)\n cmds.connectAttr(rt_lid01_dm02_asn + '.output1D', rt_lidrails + '.offsetbottom[2].offsetbottom_Position', f=True)\n cmds.connectAttr(rt_dn[1] + '.ty', rt_dn02_rvn + '.inputX', f=True)\n cmds.connectAttr(rt_dn02_rvn + '.outputX', rt_lidrails + '.offsetbottom[2].offsetbottom_FloatValue', f=True)\n # outer\n rt_lid01_do01_mdn = cmds.createNode('multDoubleLinear', n='rt_lid01_do01_multDoubleLinear')\n cmds.connectAttr(rt_dn[3] + '.tx', rt_lid01_do01_mdn + '.input1', f=True)\n cmds.setAttr(rt_lid01_do01_mdn + '.input2', -1.0)\n cmds.connectAttr(rt_lid01_do01_mdn + '.output', rt_lidrails + '.offsetbottom[3].offsetbottom_Position', f=True)\n cmds.connectAttr(rt_dn[3] + '.ty', rt_dn02_rvn + '.inputY', f=True)\n cmds.connectAttr(rt_dn02_rvn + '.outputY', rt_lidrails + '.offsetbottom[3].offsetbottom_FloatValue')",
"def init():\n \n # General parameters\n vect_path = '/home/laura/Documents/STAGE3/outputVectorisation_1705_new_invert/output_60_70/' # graphs directory\n csv_path = '/home/laura/Documents/STAGE3/outputVectorisation_1705_new_invert/output_60_70/tracking3/nodes_for_tracking.csv' # csv file \n dest_path = '/home/laura/Documents/STAGE3/outputVectorisation_1705_new_invert/output_60_70/tracking3/' # output directory\n verbose = True\n main_params = [vect_path, csv_path, dest_path, verbose]\n \n # Linking parameters\n createCSV = True \n forced_matching = True\n search_range = 10\n memory = 3\n adaptive_stop = 5 \n link_params = [createCSV, forced_matching, search_range, memory, \n adaptive_stop]\n \n # Tracking check parameters\n check = True # True to create a check image\n img_path = '/home/laura/Documents/STAGE3/outputVectorisation_1705_new_invert/output_60_70/tracking2/MosaicTest_t070.jpg' # image file on which to draw\n size = 1 # size of the nodes drawing\n check_params = [check, img_path, size]\n \n return main_params, link_params, check_params",
"def create_ik_setup(controls, joints):\n\n # Create control offset transforms\n exp_tf_ms = []\n for ctl in controls:\n par = cmds.listRelatives(ctl, parent=True)\n buf = create_offset_transform(ctl, BUF)\n exp = create_offset_transform(ctl, EXP)\n off = create_offset_transform(ctl, OFF)\n cmds.parent(ctl, off)\n cmds.parent(off, exp)\n cmds.parent(exp, buf)\n if par:\n cmds.parent(buf, par[0])\n exp_tf_ms.append(buf)\n\n root_control, pole_control, goal_control = controls\n handle, effector = cmds.ikHandle(sj=joints[0], ee=joints[-1], sol='ikRPsolver')\n cmds.setAttr('{}.hiddenInOutliner'.format(handle), True)\n cmds.orientConstraint(goal_control, joints[-1], mo=True)\n cmds.parent(handle, goal_control)\n cmds.hide(handle)\n\n # Connect root control to ik joint offset group\n ik_joints_offset = cmds.listRelatives(joints[0], p=True)[0]\n cmds.parentConstraint(root_control, ik_joints_offset, mo=True)\n cmds.scaleConstraint(root_control, ik_joints_offset, mo=True)\n\n # Connect twisting and pole vector control\n cmds.addAttr(goal_control, ln='twist', at='float', k=True)\n cmds.connectAttr('{}.twist'.format(goal_control), '{}.twist'.format(handle))\n cmds.poleVectorConstraint(pole_control, handle)\n\n # Add PV visibility attribute\n cmds.addAttr(goal_control, shortName='pv', longName='poleVector', at='bool', k=True)\n cmds.connectAttr('{}.pv'.format(goal_control), '{}.v'.format(pole_control))\n cmds.setAttr('{}.pv'.format(goal_control),1)\n\n # Add curve that points elbow to pole control\n crv = cmds.curve(p=[[0, 0, 0], [0, 1, 0]], d=1)\n cmds.connectAttr('{}.visibility'.format(pole_control), '{}.visibility'.format(crv))\n lock_hide_attrs(crv, attrs=['tx', 'ty', 'tz', 'rx', 'ry', 'rz', 'sx', 'sy', 'sz'])\n cmds.setAttr('{}.overrideEnabled'.format(crv), True)\n cmds.setAttr('{}.overrideDisplayType'.format(crv), 2)\n decomp_joint = cmds.createNode('decomposeMatrix')\n decomp_control = cmds.createNode('decomposeMatrix')\n cmds.connectAttr('{}.worldMatrix'.format(joints[1]), '{}.inputMatrix'.format(decomp_joint))\n cmds.connectAttr('{}.worldMatrix'.format(pole_control), '{}.inputMatrix'.format(decomp_control))\n cmds.connectAttr('{}.outputTranslate'.format(decomp_joint), '{}.controlPoints[0]'.format(crv))\n cmds.connectAttr('{}.outputTranslate'.format(decomp_control), '{}.controlPoints[1]'.format(crv))\n\n return handle, crv, exp_tf_ms",
"def setup(instname):\n global reducer, inst_name,van_mass,bleed_switch,rate,pixels\n # debugging (allows to reload changed DirectEnergyConversion package from Mantid)\n\n if instname=='MAR' or instname=='mar':\n print 'setup mari'\n inst_name='MAR'\n reducer = DRC.setup_reducer('MARI')\n bleed_switch=False\n rate=0.0\n pixels=0\n elif instname=='MER' or instname=='mer':\n print 'setup merlin'\n inst_name='MER'\n reducer = DRC.setup_reducer('MERLIN')\n bleed_switch=True\n rate=0.01\n pixels=80\n elif instname=='MAP' or instname=='map':\n print 'setup maps'\n inst_name='MAP'\n reducer = DRC.setup_reducer('MAPS')\n bleed_switch=False\n rate=0.0\n pixels=0.0\n elif instname=='LET' or instname=='let':\n print 'setup let'\n inst_name='LET'\n reducer = DRC.setup_reducer('LET')\n bleed_switch=True\n rate=0.01\n pixels=80\n elif instname=='ARCS' or instname=='arcs':\n print 'setup Arcs'\n inst_name='ARC'\n reducer = DRC.setup_reducer('ARCS')\n bleed_switch=False\n rate=0.01\n pixels=80\n elif instname=='SEQ' or instname=='seq':\n print 'setup Sequoia'\n inst_name='SEQ'\n reducer = DRC.setup_reducer('SEQUOIA')\n bleed_switch=False\n rate=0.01\n pixels=80\n elif instname=='CNCS' or instname=='cncs':\n print 'setup cncs'\n inst_name='SEQ'\n reducer = DRC.setup_reducer('CNCS')\n bleed_switch=False\n rate=0.01\n pixels=80\n elif instname=='HYSPEC' or instname=='hyspec':\n print 'setup hyspec'\n inst_name='SEQ'\n reducer = DRC.setup_reducer('HYSPEC')\n bleed_switch=False\n rate=0.01\n pixels=80\n else:\n print 'Instrument name not defined'\n return \n van_mass=reducer.get_default_parameter('vanadium-mass')",
"def __init__(self, parent):\n super(Demo4, self).__init__(parent)\n self.scenes = []\n self.draw_axes = True\n self.lighting = True\n self.current_scene = 0\n self.objects = []\n self.diffuse_light = [0.8, 0.8, 0.8, 1]",
"def __init__(self, cat_path, img_path, orig_json_path, rst_json_path,\n layers, **kwargs):\n\n # Initialize class attributes\n self.name = \"AI2D-RST\" # Dataset name\n self.cat_path = Path(cat_path) # Path to categories JSON\n self.img_path = Path(img_path) # Path to AI2D images\n self.orig_json_path = Path(orig_json_path) # Path to AI2D JSON\n self.rst_json_path = Path(rst_json_path) # Path to AI2D-RST JSON\n\n # Check input types\n assert self.cat_path.is_file()\n assert self.img_path.is_dir()\n assert self.orig_json_path.is_dir()\n assert self.rst_json_path.is_dir()\n assert layers in ['grouping', 'grouping+connectivity', 'connectivity',\n 'discourse', 'discourse+connectivity']\n\n # Load node and edge dictionaries\n self.node_dict = node_dicts\n self.edge_dict = edge_dicts\n\n # Load diagram labels from the labels JSON file\n categories = self._load_annotation(cat_path)\n\n # Initialize label encoder and encode integer labels\n le = LabelEncoder().fit(list(categories.values()))\n\n # Create a dictionary mapping encoded class integers to their names\n self.class_names = {k: v for k, v in zip(le.transform(le.classes_),\n le.classes_)}\n\n # Create a dictionary mapping filenames to labels\n label_dict = {k: le.transform([v]) for k, v in categories.items()}\n\n # Convert labels into a numpy array for calculating class weights\n label_arr = np.concatenate(list(label_dict.values()))\n\n # Calculate class weights\n class_weights = compute_class_weight(class_weight='balanced',\n classes=np.unique(label_arr),\n y=label_arr)\n\n # Wrap class weights into a torch Tensor and make available through\n # attribute\n self.class_weights = torch.FloatTensor(class_weights)\n\n # Get diagram identifiers and labels\n self.file_ids = list(label_dict.keys())\n self.labels = list(label_dict.values())\n\n # Return DGL graph objects by default\n self._return_nx = False\n\n # Check if NetworkX graphs have been requested\n if kwargs and 'nx' in kwargs:\n\n # Set the flag for returning NetworkX graphs to True\n if kwargs['nx']:\n \n self._return_nx = True\n\n # Check if node type information should be added to node features\n if kwargs and kwargs['node_types']:\n\n # Set add node types flag to True\n self._add_node_types = True\n\n # Check which node label dictionary to use: this depends on the kind\n # of annotation layers requested\n if 'discourse' in layers:\n\n # Get the node labels from the node dictionary & cast to array\n node_labels = list(self.node_dict['discourse'].values())\n node_labels = np.asarray(node_labels)\n\n else:\n\n # Get the node labels from the node dictionary & cast to array\n node_labels = list(self.node_dict['grouping'].values())\n node_labels = np.asarray(node_labels)\n\n # Initialize label binarizer and fit to node labels\n self._node_binarizer = LabelBinarizer().fit(node_labels)\n\n else:\n\n self._add_node_types = False\n\n # Check if smoothed labels have been requested\n if 'smooth' in kwargs and kwargs['smooth']:\n\n # Set the flag for smoothed labels to True\n self._smooth_labels = True\n\n else:\n\n self._smooth_labels = False\n\n # Initialize label binarizer for RST relations if needed\n if 'discourse' in layers:\n\n # Get the RST relations from the node dictionary\n rst_relations = np.asarray(list(self.node_dict['relations'].values()))\n\n # Initialize label binarizer and fit to node labels\n self._rst_binarizer = LabelBinarizer().fit(rst_relations)\n\n # Load the requested annotation and create the graphs accordingly\n self._load(layers)\n\n # Get number of unique diagram classes in the dataset\n self.n_classes = len(np.unique(self.labels))\n\n # Get the number of node and edge classes for DGL graphs (grouping +\n # connectivity)\n if 'discourse' not in layers and not self._return_nx:\n\n # Get unique node and edge types for graphs that don't use typed\n # nodes or edges\n node_list = [x.ndata['kind'].flatten() for x in self.diagrams]\n self.n_node_classes = len(np.unique(torch.cat(node_list).numpy()))\n\n edge_list = [x.edata['kind'].flatten() for x in self.diagrams]\n self.n_edge_classes = len(np.unique(torch.cat(edge_list).numpy()))\n\n # Do the same for DGLHeteroGraphs (discourse)\n if 'discourse' in layers and not self._return_nx:\n\n node_list = np.concatenate(np.asarray([x.ntypes for x in\n self.diagrams]))\n self.n_node_classes = len(np.unique(node_list))\n\n edge_list = np.concatenate(np.asarray([x.etypes for x in\n self.diagrams]))\n self.n_edge_classes = len(np.unique(edge_list))",
"def setup(args):\n # chaparral,denseForest,lake,canyon,burning,burnt = neighbours\n config_path = args[0]\n config = utils.load(config_path)\n # -- THE CA MUST BE RELOADED IN THE GUI IF ANY OF THE BELOW ARE CHANGED --\n config.title = \"Forest Fire\"\n config.dimensions = 2\n config.states = \\\n (\n CHAPARRAL,\n DENSE_FORREST,\n LAKE,\n CANYON,\n BURNING,\n BURNT,\n START_BURN,\n END_BURN\n )\n\n # ------------ -------------------------------------------------------------\n\n config.state_colors = \\\n [\n (0.6,0.6,0), #chaparral\n (0,0.4,0), #dense forrest\n (0,0.5,1), #lake\n (0.5,0.5,0.5), #canyon\n (1,0,0), #burning\n (0.25,0.25,0.25), #burnt\n (1,0.7,0), #starting to burn\n (0.8,0,0.2) #ending burn\n ]\n\n config.grid_dims = (grid_size, grid_size)\n config.num_generations = 1000\n config.set_initial_grid(initial_grid)\n config.wrap = False\n\n # --------------------------------------------------------------------\n\n # the GUI calls this to pass the user defined config\n # into the main system with an extra argument\n # do not change\n if len(args) == 2:\n config.save()\n sys.exit()\n return config",
"def setup(args):\n cfg = get_cfg()\n\n cfg.merge_from_file(model_zoo.get_config_file(\"COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml\"))\n cfg.merge_from_list(args.opts)\n\n # configs for training\n if args.small_vidor: # cfg.DATASETS.VIDOR.SIZE == 'small':\n cfg.DATASETS.TRAIN = (\"vidor_small_train\",)\n elif args.small_vidor_10imgs: # cfg.DATASETS.VIDOR.SIZE == 'small-10imgs':\n cfg.DATASETS.TRAIN = (\"vidor_small_10imgs_train\",)\n else:\n cfg.DATASETS.TRAIN = (\"vidor_large_train\",)\n # cfg.DATALOADER.NUM_WORKERS = 2\n if not args.eval_only:\n cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(\"COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml\") # Let training initialize from model zoo\n factor = 4\n cfg.SOLVER.IMS_PER_BATCH = 16 * factor\n cfg.SOLVER.BASE_LR = 0.0001 * factor # finetune using 10x smaller base_lr\n cfg.SOLVER.MAX_ITER = 270000 // factor \n cfg.SOLVER.STEPS = [210000 // factor, 250000 // factor]\n # cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128 # default: 512\n cfg.MODEL.ROI_HEADS.NUM_CLASSES = 78\n\n # configs for testing\n # cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, \"model_final.pth\")\n if args.small_vidor: # cfg.DATASETS.VIDOR.SIZE == 'small':\n cfg.DATASETS.TEST = (\"vidor_small_val\",)\n elif args.small_vidor_10imgs: # cfg.DATASETS.VIDOR.SIZE == 'small-10imgs':\n cfg.DATASETS.TEST = (\"vidor_small_10imgs_val\",)\n else:\n cfg.DATASETS.TEST = (\"vidor_large_val\",)\n # cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5\n\n # cfg.OUTPUT_DIR = './output/train_vidor_with_pseudo_labels'\n \n \n if not args.eval_only:\n os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)\n cfg.freeze()\n default_setup(cfg, args)\n return cfg",
"def __init__(self,config = None):\n \n self.join_path = join_path\n self.label_path = cfg['labels_path']\n self.pick_path = (cfg['result_path'] + cfg['pickle_path'])\n self.label_dir = os.path.join(CWD_PATH,self.join_path, self.label_path)\n\n #Variables inherent to the Fluent data: \n self.num_ins = 4\n\n self.scale_var = cfg['scale_var']\n # User set values are below. These can be adjusted in config.yml \n self.MSE_thresh1 = (cfg['thresh1']*self.scale_var)**2\n self.MSE_thresh2 = (cfg['thresh2']*self.scale_var)**2\n self.MSE_thresh3 = (cfg['thresh3']*self.scale_var)**2\n \n self.rew_goal = cfg['reward'] * self.scale_var\n\n self.noise = cfg['noise']\n self.minmaxbuffer = cfg['minmaxbuffer']\n\n # Get the function of input-output mapping, and max & min:\n [self.O_CH4_flow_uniformity, mins,maxes] = self.get_funcs('O_CH4_flow_uniformity')\n [self.O_CH4_mol_frac, mins,maxes] = self.get_funcs('O_CH4_mol_frac')\n [self.O_t, mins, maxes] = self.get_funcs('O_t')\n \n self.mins = mins# * self.scale_var\n self.maxes = maxes#* self.scale_var\n #Action range is a percentage of the total range\n self.action_range = cfg['action_range']*self.scale_var\n\n #Action space is the up & down range for the 4 actions \n self.action_space = Box(-self.action_range, self.action_range, shape=(self.num_ins,), dtype=np.float32)\n\n # For ref, this is a 10d state space:\n #in: 1 ch4 flow, 2 ch4 t, 3 o2 flow, 4 o2 t,\n #out: 5 flow unif, 6 mol frac, 7 temp\n #out - target: 8 flow unif, 9 mol frac, 10 temp\n \n self.observation_space = Tuple((Box(self.mins.values[0],self.maxes.values[0],shape=(1,), dtype=np.float32),\n Box(self.mins.values[1],self.maxes.values[1],shape=(1,), dtype=np.float32),\n Box(self.mins.values[2],self.maxes.values[2],shape=(1,), dtype=np.float32),\n Box(self.mins.values[3],self.maxes.values[3],shape=(1,), dtype=np.float32),\n Box(self.mins.values[4],self.maxes.values[4],shape=(1,), dtype=np.float32),\n Box(self.mins.values[5],self.maxes.values[5],shape=(1,), dtype=np.float32),\n Box(self.mins.values[6],self.maxes.values[6],shape=(1,), dtype=np.float32),\n Box(self.mins.values[4],self.maxes.values[4],shape=(1,), dtype=np.float32),\n Box(self.mins.values[5],self.maxes.values[5],shape=(1,), dtype=np.float32),\n Box(self.mins.values[6],self.maxes.values[6],shape=(1,), dtype=np.float32)))\n \n # TODO this isn't really a proper gym spec\n self._spec = lambda: None\n self._spec.id = \"AllVar-v0\"\n \n # For rendering:\n self.viewer = None\n self.labels = cfg['labels']\n \n #initialize variables for tracking:\n self.episode = 0\n self.reward = 0\n self.reset()",
"def __init__(self, cfg_index, conditions, pars_dir, step_title, use_defaults, input_cfg_json_data):\n super().__init__(cfg_index, conditions, pars_dir, step_title, use_defaults, input_cfg_json_data)\n self.set_name = \"alignment\"\n if input_cfg_json_data:\n self._read_custom_pars()\n else:\n self._combine_conditions()",
"def init_vars():\n\tda_vinci.base.usepackage(\"pgfkeys\")\n\tda_vinci.base.add_preamble(setup_script)",
"def setup_flags(self):\n self.io_args.color = self.io_args.color_full\n self.io_args.rig_in = self.io_args.rig\n self.io_args.matches = os.path.join(self.io_args.output_root, \"matches.json\")\n self.io_args.rig_out = os.path.join(self.io_args.output_root, \"rig.json\")",
"def init_trainers(self, args):\n self.actors_cur = [None for _ in range(self.num_agents)]\n self.critics_cur = [None for _ in range(self.num_agents)]\n self.actors_tar = [None for _ in range(self.num_agents)]\n self.critics_tar = [None for _ in range(self.num_agents)]\n self.optimizers_c = [None for _ in range(self.num_agents)]\n self.optimizers_a = [None for _ in range(self.num_agents)]\n input_size_global = sum(self.obs_shape_n) + sum(self.action_shape_n)\n\n if args.restore == True: # restore the model\n game_step = int(args.old_model_name.split('_')[-1][:-1])\n for idx in range(self.num_agents):\n self.actors_cur[idx] = torch.load(args.old_model_name+'a_c_{}.pt'.format(idx))\n self.actors_tar[idx] = torch.load(args.old_model_name+'a_t_{}.pt'.format(idx))\n self.critics_cur[idx] = torch.load(args.old_model_name+'c_c_{}.pt'.format(idx))\n self.critics_tar[idx] = torch.load(args.old_model_name+'c_t_{}.pt'.format(idx))\n self.optimizers_a[idx] = optim.Adam(self.actors_cur[idx].parameters(), args.lr_a)\n self.optimizers_c[idx] = optim.Adam(self.critics_cur[idx].parameters(), args.lr_c)\n self.var = self.var - (game_step-args.learning_start_episode*args.per_episode_max_len)*args.var_discount\n self.var = self.min_var if self.var < self.min_var else self.var\n old_data = {'game_step':game_step, 'episode_gone_old':int(game_step/args.per_episode_max_len)}\n\n # Note: if you need load old model, there should be a procedure for juding if the trainers[idx] is None\n for i in range(self.num_agents):\n self.actors_cur[i] = actor_agent(self.obs_shape_n[i], self.action_shape_n[i], \\\n args).to(args.device)\n self.critics_cur[i] = critic_agent(sum(self.obs_shape_n), sum(self.action_shape_n), \\\n args).to(args.device)\n self.actors_tar[i] = actor_agent(self.obs_shape_n[i], self.action_shape_n[i], \\\n args).to(args.device)\n self.critics_tar[i] = critic_agent(sum(self.obs_shape_n), sum(self.action_shape_n), \\\n args).to(args.device)\n self.optimizers_a[i] = optim.Adam(self.actors_cur[i].parameters(), args.lr_a)\n self.optimizers_c[i] = optim.Adam(self.critics_cur[i].parameters(), args.lr_c)\n\n # return the old data, no need to update the trainers\n if args.restore == True: return old_data\n\n self.actors_tar = self.update_trainers(self.actors_cur, self.actors_tar, 1.0) # update the target par using the cur\n self.critics_tar = self.update_trainers(self.critics_cur, self.critics_tar, 1.0) # update the target par using the cur",
"def __init__(self, modifications = {}):\n CoveredState.__init__(self, BASE_CONFIG, coverLayers = TOTAL_LAYERS)\n for name in modifications:\n self.Edit(name, modifications[name])\n if unix:\n self.Cover(\"BuilderDir\", VELAUNCHER_DIR, layer = UNAVAILABLE_LAYER)",
"def _augment_pipeline_cfg(self):",
"def set_defaults(args):\n # Check critical files exist\n args.train_file = os.path.join(args.data_dir, args.train_file)\n if not os.path.isfile(args.train_file):\n raise IOError('No such file: %s' % args.train_file)\n args.dev_file = os.path.join(args.data_dir, args.dev_file)\n if not os.path.isfile(args.dev_file):\n raise IOError('No such file: %s' % args.dev_file)\n if args.embedding_file:\n args.embedding_file = os.path.join(args.embed_dir, args.embedding_file)\n if not os.path.isfile(args.embedding_file):\n raise IOError('No such file: %s' % args.embedding_file)\n\n # Set model directory\n subprocess.call(['mkdir', '-p', args.model_dir])\n\n # Set model name\n if not args.model_name:\n import uuid\n import time\n args.model_name = time.strftime(\"%Y%m%d-\") + str(uuid.uuid4())[:8]\n\n # Set log + model file names\n args.log_file = os.path.join(args.model_dir, args.model_name + '.txt')\n args.model_file = os.path.join(args.model_dir, args.model_name + '.pt')\n\n # Embeddings options\n if args.embedding_file:\n with open(args.embedding_file) as f:\n dim = len(f.readline().strip().split(' ')) - 1\n args.embedding_dim = dim\n elif not args.embedding_dim:\n raise RuntimeError('Either embedding_file or embedding_dim '\n 'needs to be specified.')\n\n # Make sure fix_embeddings and embedding_file are consistent\n if args.fix_embeddings:\n if not (args.embedding_file or args.pretrained):\n logger.warning('WARN: fix_embeddings set to False '\n 'as embeddings are random.')\n args.fix_embeddings = False\n return args",
"def fill_args(args):\n args.agent_module = 'dstar_sgolam_walker'\n args.checkpoint_path = None\n args.exp_config = 'configs/baselines/dstar_proto_sgolam.yaml'\n args.num_episodes = 25\n \n return args",
"def addControl(*args):",
"def addControl(*args):",
"def addControl(*args):",
"def addControl(*args):",
"def add_source_achors():\n pass",
"def __init__(self, parent=None):\n ScriptedLoadableModuleWidget.__init__(self, parent)\n VTKObservationMixin.__init__(self) # needed for parameter node observation\n self.logic = None"
] | [
"0.5719306",
"0.5415949",
"0.5388942",
"0.5385029",
"0.5336632",
"0.5299819",
"0.5200528",
"0.5145229",
"0.5140653",
"0.51194525",
"0.5079638",
"0.50723904",
"0.50585204",
"0.50376135",
"0.5010488",
"0.49763772",
"0.49707508",
"0.49594924",
"0.4954051",
"0.4949411",
"0.4947352",
"0.4943901",
"0.4934593",
"0.49284354",
"0.4923892",
"0.4923892",
"0.4923892",
"0.4923892",
"0.49217173",
"0.4919505"
] | 0.5789509 | 0 |
Create an IK attribute on the given ctrl, connect IK handles to ik switch. Also connect fk ctrls and ik ctrls visibility to switch. This will create an 'IK' attr on the switch ctrl | def create_fk_ik_switch(switch_ctrl, ik_handles, fk_ctrls, ik_ctrls, vis_ctrl=None, switch_attr_name='IK', vis_attr_name='fkIkCtrlVis'):
fk_ctrls = mc.ls(fk_ctrls)
ik_ctrls = mc.ls(ik_ctrls)
ik_handles = mc.ls(ik_handles)
if not vis_ctrl:
vis_ctrl = switch_ctrl
# Create attributes
if not mc.objExists(switch_ctrl+'.'+switch_attr_name):
mc.addAttr(switch_ctrl, ln=switch_attr_name, min=0, max=1, k=1)
if not mc.objExists(vis_ctrl+'.'+vis_attr_name):
mc.addAttr(vis_ctrl, ln=vis_attr_name, at='enum', en='auto:fkOnly:ikOnly:both', k=1)
# Connect ik handles
for handle in ik_handles:
mc.connectAttr(switch_ctrl+'.'+switch_attr_name, handle+'.ikBlend')
# Create swicth for ik ctrl
ik_choice = utils.create_node('choice', n=vis_attr_name+'_ik_choice')
mc.connectAttr(vis_ctrl+'.'+vis_attr_name, ik_choice+'.selector')
mc.connectAttr(switch_ctrl+'.'+switch_attr_name, ik_choice+'.input[0]')
mc.setAttr(ik_choice+'.input[1]', 0)
mc.setAttr(ik_choice+'.input[2]', 1)
mc.setAttr(ik_choice+'.input[3]', 1)
for ctrl in ik_ctrls:
mc.setAttr(ctrl+'.v', l=0)
mc.connectAttr(ik_choice+'.output', ctrl+'.v', f=1)
mc.setAttr(ctrl+'.v', l=1)
# Create swicth for ik ctrl
fk_choice = utils.create_node('choice', n=vis_attr_name+'_fk_choice')
fk_rv = utils.create_node('reverse', n=vis_attr_name+'_fk_choice')
mc.connectAttr(switch_ctrl+'.'+switch_attr_name, fk_rv+'.inputX')
mc.connectAttr(vis_ctrl+'.'+vis_attr_name, fk_choice+'.selector')
mc.connectAttr(fk_rv+'.outputX', fk_choice+'.input[0]')
mc.setAttr(fk_choice+'.input[1]', 1)
mc.setAttr(fk_choice+'.input[2]', 0)
mc.setAttr(fk_choice+'.input[3]', 1)
for ctrl in fk_ctrls:
mc.setAttr(ctrl+'.v', l=0)
mc.connectAttr(fk_choice+'.output', ctrl+'.v', f=1)
mc.setAttr(ctrl+'.v', l=1)
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_soft_ik(ik_ctrl, ik_joints, ik_handle):\n\n # get name and constant variables\n name = ik_handle+'Soft'\n parent = utils.get_parent(ik_joints[0])\n ik_handle_parent = utils.get_parent(ik_handle)\n\n # get total length of joint chain\n chain_length = 0\n for jnt in ik_joints[1:]:\n chain_length += abs(mc.getAttr(jnt+'.tx'))\n\n mc.addAttr(ik_joints[0], ln='softIkChainLength', k=1, dv=chain_length)\n\n #create dist node, (distance between top ik_joint and ik_handle) = X\n soft_ik_root = utils.snap_locator(ik_joints[0], node_type='transform')\n soft_ik_root = mc.rename(soft_ik_root, name+'_root_'+utils.get_suffix('transform'))\n\n dist = utils.create_distance_reader(soft_ik_root, ik_handle_parent)\n\n #create the dSoft and softIK attributes on the controller\n mc.addAttr(ik_ctrl, ln='softIK', min=0, k=1)\n ctrl_clamp = mc.createNode('clamp')\n mc.connectAttr(ik_ctrl+'.softIK', ctrl_clamp+'.inputR')\n mc.setAttr(ctrl_clamp+'.minR', 0.0001)\n mc.setAttr(ctrl_clamp+'.maxR', 10000000)\n\n #create node network for soft IK\n da_pma = mc.createNode('plusMinusAverage', n=name+'_da_pma')\n x_minus_da_pma = mc.createNode('plusMinusAverage', n=name+'_x_minus_da_pma')\n negate_x_minus_md = mc.createNode('multiplyDivide', n=name+'_negate_x_minus_md')\n divBy_dSoft_md = mc.createNode('multiplyDivide', n=name+'_divBy_dSoft_md')\n pow_e_md = mc.createNode('multiplyDivide', n=name+'_pow_e_md')\n one_minus_pow_e_pma = mc.createNode('plusMinusAverage', n=name+'_one_minus_pow_e_pma')\n times_dSoft_md = mc.createNode('multiplyDivide', n=name+'_times_dSoft_md')\n plus_da_pma = mc.createNode('plusMinusAverage', n=name+'_plus_da_pma')\n da_cond = mc.createNode('condition', n=name+'_da_cond')\n dist_diff_pma = mc.createNode('plusMinusAverage', n=name+'_dist_diff_pma')\n defaultPos_pma = mc.createNode('plusMinusAverage', n=name+'_defaultPos_pma')\n\n #set operations\n mc.setAttr(da_pma+'.operation', 2)\n mc.setAttr(x_minus_da_pma+'.operation', 2)\n mc.setAttr(negate_x_minus_md+'.operation', 1)\n mc.setAttr(divBy_dSoft_md+'.operation', 2)\n mc.setAttr(pow_e_md+'.operation', 3)\n mc.setAttr(one_minus_pow_e_pma+'.operation', 2)\n mc.setAttr(times_dSoft_md+'.operation', 1)\n mc.setAttr(plus_da_pma+'.operation', 1)\n mc.setAttr(da_cond+'.operation', 5)\n mc.setAttr(dist_diff_pma+'.operation', 2)\n mc.setAttr(defaultPos_pma+'.operation', 2)\n\n #make connections\n mc.connectAttr(ik_joints[0]+'.softIkChainLength', da_pma+'.input1D[0]')\n mc.connectAttr(ctrl_clamp+'.outputR', da_pma+'.input1D[1]')\n\n mc.connectAttr(dist+'.localDistance', x_minus_da_pma+'.input1D[0]')\n mc.connectAttr(da_pma+'.output1D', x_minus_da_pma+'.input1D[1]')\n\n mc.connectAttr(x_minus_da_pma+'.output1D', negate_x_minus_md+'.input1X')\n mc.setAttr(negate_x_minus_md+'.input2X', -1)\n\n mc.connectAttr(negate_x_minus_md+'.outputX', divBy_dSoft_md+'.input1X')\n mc.connectAttr(ctrl_clamp+'.outputR', divBy_dSoft_md+'.input2X')\n\n mc.setAttr(pow_e_md+'.input1X', 2.718281828)\n mc.connectAttr(divBy_dSoft_md+'.outputX', pow_e_md+'.input2X')\n\n mc.setAttr(one_minus_pow_e_pma+'.input1D[0]', 1)\n mc.connectAttr(pow_e_md+'.outputX' , one_minus_pow_e_pma+'.input1D[1]')\n\n mc.connectAttr(one_minus_pow_e_pma+'.output1D', times_dSoft_md+'.input1X')\n mc.connectAttr(ctrl_clamp+'.outputR', times_dSoft_md+'.input2X')\n\n mc.connectAttr(times_dSoft_md+'.outputX', plus_da_pma+'.input1D[0]')\n mc.connectAttr(da_pma+'.output1D', plus_da_pma+'.input1D[1]')\n\n mc.connectAttr(da_pma+'.output1D', da_cond+'.firstTerm')\n mc.connectAttr(dist+'.localDistance', da_cond+'.secondTerm')\n mc.connectAttr(dist+'.localDistance', da_cond+'.colorIfFalseR')\n mc.connectAttr(plus_da_pma+'.output1D', da_cond+'.colorIfTrueR')\n\n mc.connectAttr(da_cond+'.outColorR', dist_diff_pma+'.input1D[0]')\n mc.connectAttr(dist+'.localDistance', dist_diff_pma+'.input1D[1]')\n\n mc.setAttr(defaultPos_pma+'.input1D[0]', 0)\n mc.connectAttr(dist_diff_pma+'.output1D', defaultPos_pma+'.input1D[1]')\n\n # Create new ik aim node\n up = [1,0,0]\n aim = [0,1,0]\n\n grp = mc.createNode('transform', n=name+'_soft_aim_'+utils.get_suffix('transform'), p=ik_handle_parent)\n gAim = mc.createNode('transform', n=name+'_soft_'+utils.get_suffix('transform'), p=grp)\n\n mc.aimConstraint(soft_ik_root,\n grp,\n aim=aim,\n u=up,\n wu=up,\n wut='objectRotation',\n wuo=ik_ctrl,\n n=grp+'_ac')\n\n mc.connectAttr(defaultPos_pma+'.output1D', gAim+'.ty')\n mc.pointConstraint(gAim, ik_handle)\n mc.parent(ik_handle, gAim)\n\n # parent stuff\n if parent:\n mc.parent(soft_ik_root, parent)\n\n return gAim",
"def switch_to_ik(robot):\n\n target_ctrl_path = get_target_ctrl_path(robot)\n tool_ctrl_path = get_tool_ctrl_path(robot)\n fk_ctrls_path = format_path(__FK_CTRLS_PATH, robot)\n\n try:\n # Turn FK control visibility off\n pm.setAttr(fk_ctrls_path + '.v', 0)\n\n # Turn IK control visibility on\n pm.setAttr(target_ctrl_path + '.v', 1)\n pm.setAttr(format_path(__TARGET_CTRL_PATH + '|{1}target_CTRLShape',\n robot) + '.visibility', 1)\n\n if pm.objExists(tool_ctrl_path):\n pm.setAttr(tool_ctrl_path + '.v'.format(robot), 1)\n except:\n # These aren't crucial to the switch as they're just visual, and \n # a connection or locking of any of these attributes might throw\n # an error, so let's just skip it\n pass\n \n try:\n # Snap IK Ctrl to FK location\n _snap_ik_target_to_fk(robot)\n except:\n raise MimicError('Error swithching to IK; could not snap IK CTRL to FK')\n\n ## Find closest IK configuration to current FK pose ##\n # Get FK config and all IK solutions\n ik_sols = find_ik_solutions(robot)\n fk_config = find_fk_config(robot)\n\n # Remove all MFG-specific offsets from the FK config\n solver_params = get_solver_params(robot)\n axis_offsets = solver_params.axis_offsets\n rot_directions = solver_params.rot_directions\n fk_config_norm = _normalize_fk_pose(fk_config, axis_offsets, rot_directions)\n\n ## TO-DO: account for FK config rotations above and below 180 degrees\n # Select the closes IK configuration to the given FK config\n ik_config = find_closest_config(fk_config_norm, ik_sols)\n\n # Match IK config to FK pose\n pm.setAttr(target_ctrl_path + '.ikSolution1', ik_config[0])\n pm.setAttr(target_ctrl_path + '.ikSolution2', ik_config[1])\n pm.setAttr(target_ctrl_path + '.ikSolution3', ik_config[2])\n\n # turn ik solve back on\n pm.setAttr(target_ctrl_path + '.ik', 1)",
"def make_fkikSwitch_connection_attrs(partpre=None, side='Lt', source_ctrl=None, tag_name='switch', snapTo=None,\n add_attrs=None):\n\n switch_anim = ''\n if source_ctrl is not None:\n switch_anim = source_ctrl\n\n partpre = partpre\n if partpre == '':\n partpre = 'mypart_'\n\n if source_ctrl is None:\n # filepath = r'C:/Users/Nicob/Documents/maya/scripts/rigBot/rigBot/config/switcher_anim.mb'\n system_base_path = os.path.dirname(utils.__file__)\n base_path = os.path.join(system_base_path, 'config')\n file_path = os.path.join(base_path, 'switcher_anim.mb')\n newnodes = mc.file(filepath, i=1, ignoreVersion=1, rnn=1, mergeNamespacesOnClash=0, rpr=partpre, ra=1,\n options=\"v=0;\", pr=1)\n\n switch_anim = partpre + '_CTL'\n\n # pos switcher grpOffset node if snapTo\n\n if snapTo is not None:\n utils.snap_to_transform(snapTo, switch_anim.replace('CTL', 'grpOffset'))\n mc.setAttr(switch_anim.replace('CTL', 'grpOffset') + '.r', 0, 0, 0)\n\n # get value of tags and sort into ik and fk vis groups\n\n iks = []\n fks = []\n nodes = mc.ls('*.' + tag_name)\n\n for node in nodes:\n if partpre in node and side in node:\n mode = mc.getAttr(node)\n if mode:\n mode = mode.lower()\n if 'ik' in mode:\n iks.append(node.split('.')[0])\n if 'fk' in mode:\n fks.append(node.split('.')[0])\n for ik in iks:\n # ikparpar=utils.get_parent(ik)\n ikpar = utils.get_parent(ik)\n if ikpar is None:\n mc.connectAttr(switch_anim + '.FK_IK', ik + '.visiblity', f=1)\n else:\n mc.connectAttr(switch_anim + '.FK_IK', ikpar + '.visibility', f=1)\n rvn = mc.createNode('reverse', name=switch_anim + '_fkik_vis_rv')\n mc.connectAttr(switch_anim + '.FK_IK', rvn + '.inputX')\n for fk in fks:\n fkpar = utils.get_parent(fk)\n if fkpar:\n mc.connectAttr(rvn + '.outputX', fkpar + '.visibility', f=1)\n if add_attrs is not None:\n for att in add_attrs:\n mc.addAttr(switch_anim, ln=att, min=0, max=1, dv=0, k=1)\n\n nns = []\n\n for nn in reversed(newnodes):\n nnn = ''\n sn = nn.split(\"|\")\n nnn = mc.rename(nn, sn[-1])\n nns.append(nnn)\n\n anim = mc.ls(partpre + '_CTL')\n\n # if mc.objExists (partpre+'_skeleton_grp'):\n # mc.parent (anim, partpre+'_skeleton_grp' )\n return anim",
"def create_ik_setup(controls, joints):\n\n # Create control offset transforms\n exp_tf_ms = []\n for ctl in controls:\n par = cmds.listRelatives(ctl, parent=True)\n buf = create_offset_transform(ctl, BUF)\n exp = create_offset_transform(ctl, EXP)\n off = create_offset_transform(ctl, OFF)\n cmds.parent(ctl, off)\n cmds.parent(off, exp)\n cmds.parent(exp, buf)\n if par:\n cmds.parent(buf, par[0])\n exp_tf_ms.append(buf)\n\n root_control, pole_control, goal_control = controls\n handle, effector = cmds.ikHandle(sj=joints[0], ee=joints[-1], sol='ikRPsolver')\n cmds.setAttr('{}.hiddenInOutliner'.format(handle), True)\n cmds.orientConstraint(goal_control, joints[-1], mo=True)\n cmds.parent(handle, goal_control)\n cmds.hide(handle)\n\n # Connect root control to ik joint offset group\n ik_joints_offset = cmds.listRelatives(joints[0], p=True)[0]\n cmds.parentConstraint(root_control, ik_joints_offset, mo=True)\n cmds.scaleConstraint(root_control, ik_joints_offset, mo=True)\n\n # Connect twisting and pole vector control\n cmds.addAttr(goal_control, ln='twist', at='float', k=True)\n cmds.connectAttr('{}.twist'.format(goal_control), '{}.twist'.format(handle))\n cmds.poleVectorConstraint(pole_control, handle)\n\n # Add PV visibility attribute\n cmds.addAttr(goal_control, shortName='pv', longName='poleVector', at='bool', k=True)\n cmds.connectAttr('{}.pv'.format(goal_control), '{}.v'.format(pole_control))\n cmds.setAttr('{}.pv'.format(goal_control),1)\n\n # Add curve that points elbow to pole control\n crv = cmds.curve(p=[[0, 0, 0], [0, 1, 0]], d=1)\n cmds.connectAttr('{}.visibility'.format(pole_control), '{}.visibility'.format(crv))\n lock_hide_attrs(crv, attrs=['tx', 'ty', 'tz', 'rx', 'ry', 'rz', 'sx', 'sy', 'sz'])\n cmds.setAttr('{}.overrideEnabled'.format(crv), True)\n cmds.setAttr('{}.overrideDisplayType'.format(crv), 2)\n decomp_joint = cmds.createNode('decomposeMatrix')\n decomp_control = cmds.createNode('decomposeMatrix')\n cmds.connectAttr('{}.worldMatrix'.format(joints[1]), '{}.inputMatrix'.format(decomp_joint))\n cmds.connectAttr('{}.worldMatrix'.format(pole_control), '{}.inputMatrix'.format(decomp_control))\n cmds.connectAttr('{}.outputTranslate'.format(decomp_joint), '{}.controlPoints[0]'.format(crv))\n cmds.connectAttr('{}.outputTranslate'.format(decomp_control), '{}.controlPoints[1]'.format(crv))\n\n return handle, crv, exp_tf_ms",
"def switch_setup(params, rig, ik_joints):\n\n # Duplicate for bind skeleton\n skeleton = [x.name() for x in params['ikSkeleton']]\n bind_skeleton = cmds.duplicate(skeleton, n=skeleton[0] + '_bnd_0')\n #bind_skeleton\n\n # Hide all attribute on Controller\n fkikcontrol = params['fkIkSwitch'].name()\n attrs = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz', 'sx', 'sy', 'sz', 'v']\n for i in attrs:\n cmds.setAttr('{node}.{attr}'.format(node=fkikcontrol, attr=i), k=False, cb=False)\n\n # Create FK/IK Switch attributes\n cmds.addAttr(fkikcontrol, sn='FKIKBlend', at='float', min=0, max=1, dv=0, k=True)\n cmds.addAttr(fkikcontrol, sn='AutoVis', at='bool', dv=1, k=True)\n cmds.addAttr(fkikcontrol, ln='FKVis', at='bool', dv=1, k=True)\n cmds.addAttr(fkikcontrol, ln='IKVis', at='bool', dv=1, k=True)\n\n # create control offset transforms\n # par = cmds.listRelatives(fkikcontrol, parent=True)\n # buf = create_offset_transform(fkikcontrol, BUF)\n # cmds.parent(fkikcontrol, buf)\n # if par: cmds.parent(buf, par[0])\n\n # Parent Skeleton to rig group\n ik_skeleton = [x.name() for x in params['ikSkeleton']]\n fk_skeleton = [x.name() for x in params['fkSkeleton']]\n cmds.parent(ik_skeleton[0], rig['rigGroup'])\n cmds.parent(fk_skeleton[0], rig['rigGroup'])\n\n # Constraint Bind Skeleton\n fk_ik_finish(ik_joints, bind_skeleton, params)",
"def setCtrls(self, fks, ik, upv):\n # type: (list[str], str, str) -> None\n\n self.fkCtrls = [self._getNode(x) for x in fks]\n self.fkTargets = [self._getMth(x) for x in fks]\n\n self.ikCtrl = self._getNode(ik)\n self.ikTarget = self._getMth(ik)\n\n self.upvCtrl = self._getNode(upv)\n self.upvTarget = self._getMth(upv)\n\n self.ikRotCtrl = self._getNode(ik.replace(\"_ik_\", \"_rot_\"))\n self.ikRotTarget = self.ikTarget",
"def key_ik(*args):\n\n robots = get_robot_roots()\n if not robots:\n pm.warning('No robots selected')\n return\n\n for robot in robots:\n target_ctrl_path = get_target_ctrl_path(robot)\n tool_ctrl_path = get_tool_ctrl_path(robot)\n fk_ctrls_path = format_path(__FK_CTRLS_PATH, robot)\n\n if not pm.getAttr(target_ctrl_path + '.ik'):\n switch_to_ik(robot)\n\n ik_attributes = ['ik',\n 'v',\n 'ikSolution1',\n 'ikSolution2',\n 'ikSolution3']\n\n # Key all IK elements\n for attr in ik_attributes:\n pm.setKeyframe(target_ctrl_path, attribute=attr)\n\n if pm.objExists(tool_ctrl_path):\n pm.setKeyframe(tool_ctrl_path, attribute='v')\n\n fk_pose = find_fk_config(robot)\n\n # Key all FK elements\n pm.setKeyframe(format_path(__A1_FK_CTRL_PATH, robot),\n attribute='rotateY',\n value=fk_pose[0])\n pm.setKeyframe(format_path(__A2_FK_CTRL_PATH, robot),\n attribute='rotateX',\n value=fk_pose[1])\n pm.setKeyframe(format_path(__A3_FK_CTRL_PATH, robot),\n attribute='rotateX',\n value=fk_pose[2])\n pm.setKeyframe(format_path(__A4_FK_CTRL_PATH, robot),\n attribute='rotateZ',\n value=fk_pose[3])\n pm.setKeyframe(format_path(__A5_FK_CTRL_PATH, robot),\n attribute='rotateX',\n value=fk_pose[4])\n pm.setKeyframe(format_path(__A6_FK_CTRL_PATH, robot),\n attribute='rotateZ',\n value=fk_pose[5])\n\n # Key visibility of FK controllers\n pm.setKeyframe(fk_ctrls_path, attribute='visibility')\n\n # Key tool controllers\n if pm.checkBox('cb_keyToolCtrl', query=True, value=True):\n if pm.objExists(tool_ctrl_path):\n pm.setKeyframe(tool_ctrl_path, attribute='translate')\n pm.setKeyframe(tool_ctrl_path, attribute='rotate')\n else:\n pm.setKeyframe(target_ctrl_path, attribute='translate')\n pm.setKeyframe(target_ctrl_path, attribute='rotate')",
"def switch_to_fk(robot):\n target_ctrl_path = get_target_ctrl_path(robot)\n tool_ctrl_path = get_tool_ctrl_path(robot)\n fk_ctrls_path = format_path(__FK_CTRLS_PATH, robot)\n\n # Turn IK control visibility off\n pm.setAttr(get_target_ctrl_path(robot) + '.v', 0)\n\n if pm.objExists(tool_ctrl_path):\n pm.setAttr(tool_ctrl_path + '.v'.format(robot), 0)\n\n # Turn FK control visibility on\n pm.setAttr(fk_ctrls_path + '.v'.format(robot), 1)\n\n # Find axis angles from IK pose, and match FK control handles\n fk_config = find_fk_config(robot)\n fk_config = _reconcile_fk_pose(robot, fk_config)\n\n pm.setAttr(format_path(__A1_FK_CTRL_PATH, robot) + '.rotateY',\n fk_config[0])\n pm.setAttr(format_path(__A2_FK_CTRL_PATH, robot) + '.rotateX',\n fk_config[1])\n pm.setAttr(format_path(__A3_FK_CTRL_PATH, robot) + '.rotateX',\n fk_config[2])\n pm.setAttr(format_path(__A4_FK_CTRL_PATH, robot) + '.rotateZ',\n fk_config[3])\n pm.setAttr(format_path(__A5_FK_CTRL_PATH, robot) + '.rotateX',\n fk_config[4])\n pm.setAttr(format_path(__A6_FK_CTRL_PATH, robot) + '.rotateZ',\n fk_config[5])\n\n pm.setAttr(target_ctrl_path + '.ik', 0)",
"def toggle_ik_fk(*args):\n\n current_tab = pm.tabLayout('switcher_tab_layout',\n query=True,\n selectTab=True)\n\n if current_tab == 'ikTab':\n ik_tab = 1\n else:\n ik_tab = 0\n\n robots = get_robot_roots(1)\n if not robots:\n return\n\n for robot in robots:\n target_ctrl_path = get_target_ctrl_path(robot)\n \n if ik_tab:\n if pm.getAttr(target_ctrl_path + '.ik'):\n continue\n\n switch_to_ik(robot)\n\n else:\n if not pm.getAttr(target_ctrl_path + '.ik'):\n continue\n\n switch_to_fk(robot)\n \n # Maintain appropriate selections on each robot\n try:\n selection = []\n active_robots = get_robot_roots()\n if active_robots:\n if ik_tab:\n for robot in active_robots:\n target_ctrl_path = get_target_ctrl_path(robot)\n tool_ctrl_path = get_tool_ctrl_path(robot)\n\n if pm.objExists(tool_ctrl_path):\n selection.append(tool_ctrl_path)\n else:\n selection.append(target_ctrl_path)\n else:\n for robot in active_robots:\n selection.append(format_path(__A6_FK_CTRL_PATH, robot))\n \n pm.select(selection)\n else:\n pass\n\n except:\n pm.warning('Error selecting after IK/FK switch')",
"def SetControlSignals(inst_spec, itype, ctrl):\n\n itype <<= inst_spec.itype\n\n #\n # The Literal() function (see instructions.py) generates an Atlas 'literal'\n # value that can be used on the right-hand side of an assignment (as is done\n # below).\n #\n\n ctrl.ex <<= inst_spec.ex_ctrl.Literal()\n ctrl.mem <<= inst_spec.mem_ctrl.Literal()\n ctrl.wb <<= inst_spec.wb_ctrl.Literal()",
"def ikfkMechanics(module, extraName, jnts, mechSkelGrp, ctrlGrp, moduleType, rig):\n jntSuffix = suffix['joint']\n newJntChains = []\n ## create duplicate chains\n for chain in ['IK', 'FK']:\n newJnts = utils.duplicateJntChain(chain, jnts, parent=mechSkelGrp.name)\n newJntChains.append(newJnts)\n ikJnts = newJntChains[0]\n fkJnts = newJntChains[1]\n for i, each in enumerate(jnts):\n newName = '{}_result{}'.format(each.rsplit('_', 1)[0], jntSuffix)\n jnts[i] = cmds.rename(each, newName)\n # utils.addJntToSkinJnt(jnts[i], rig=rig)\n ## settings control\n module.settingCtrl = ctrlFn.ctrl(name='{}{}Settings'.format(extraName, moduleType),\n guide='{}{}Settings{}'.format(module.moduleName,\n moduleType, suffix['locator']),\n deleteGuide=True, side=module.side, skipNum=True,\n parent=module.rig.settingCtrlsGrp.name,\n scaleOffset=rig.scaleOffset, rig=rig)\n if moduleType == 'arm':\n settingJnt = jnts[3]\n else:\n settingJnt = jnts[2]\n module.settingCtrl.makeSettingCtrl(ikfk=True, parent=settingJnt)\n ## parent constraints\n for jnt, ikJnt, fkJnt in zip(jnts, ikJnts, fkJnts):\n parConstr = cmds.parentConstraint(ikJnt, fkJnt, jnt)\n cmds.connectAttr(module.settingCtrl.ctrl.ikfkSwitch, '{}.{}W1'.format(parConstr[0], fkJnt))\n swRev = utils.newNode('reverse', name='{}{}IKFKSw'.format(extraName, moduleType),\n side=module.side)\n swRev.connect('inputX', module.settingCtrl.ctrl.ikfkSwitch, mode='to')\n swRev.connect('outputX', '{}.{}W0'.format(parConstr[0], ikJnt), mode='from')\n ## control vis groups\n ikCtrlGrp = utils.newNode('group', name='{}{}IKCtrls'.format(extraName, moduleType),\n side=module.side, parent=ctrlGrp.name, skipNum=True)\n fkCtrlGrp = utils.newNode('group', name='{}{}FKCtrls'.format(extraName, moduleType),\n side=module.side, parent=ctrlGrp.name, skipNum=True)\n cmds.setDrivenKeyframe(ikCtrlGrp.name, at='visibility',\n cd=module.settingCtrl.ctrl.ikfkSwitch, dv=0.999, v=1)\n cmds.setDrivenKeyframe(ikCtrlGrp.name, at='visibility',\n cd=module.settingCtrl.ctrl.ikfkSwitch, dv=1, v=0)\n cmds.setDrivenKeyframe(fkCtrlGrp.name, at='visibility',\n cd=module.settingCtrl.ctrl.ikfkSwitch, dv=0.001, v=1)\n cmds.setDrivenKeyframe(fkCtrlGrp.name, at='visibility',\n cd=module.settingCtrl.ctrl.ikfkSwitch, dv=0, v=0)\n return ikJnts, fkJnts, jnts, ikCtrlGrp, fkCtrlGrp",
"def add_attr(nc_handle, var_name, key, value):\n doi_attr_name = 'DOI'\n nc.variables[varname].setncattr(key, value)",
"def ik_to_fk(node):\n ik_main_off = get_parent(node.ik_main_conn)\n fk_01_off = get_parent(node.fk_01_conn)\n fk_02_off = get_parent(node.fk_02_conn)\n fk_03_off = get_parent(node.fk_03_conn)\n\n ik_main_world_trans = get_world_trans(node.ik_main_conn)\n fk_01_world_trans = get_world_trans(node.fk_01_conn)\n ik_main_off_world_trans = get_world_trans(ik_main_off)\n fk_01_off_world_trans = get_world_trans(fk_01_off)\n fk_02_off_world_trans = get_world_trans(fk_02_off)\n fk_03_off_world_trans = get_world_trans(fk_03_off)\n\n # calculate base information\n def_len = (ik_main_off_world_trans - fk_01_off_world_trans).length()\n\n # Calculate ik direction\n ik_dir_01 = ik_main_off_world_trans - fk_01_off_world_trans\n ik_dir_02 = ik_main_world_trans - fk_01_world_trans\n\n ik_dir_rot = ik_dir_01.rotateTo(ik_dir_02).asEulerRotation()\n\n # Apply ik direction -> important to calculate correct pole rotations\n fk_01_rot_plugs = get_rot_plugs(node.fk_01_conn)\n for i, plug in enumerate(fk_01_rot_plugs):\n plug.setMAngle(oMa.MAngle(ik_dir_rot[i], oMa.MAngle.kRadians))\n\n # Calculate ik pole rotations\n ik_pole_world_mat = get_world_matrix(node.ik_pole_conn, 0)\n fk_03_world_inv_mat = get_world_inv_matrix(node.fk_01_conn, 0)\n\n ik_pole_rot_mat = ik_pole_world_mat * fk_03_world_inv_mat\n\n ik_pole_vec = oMa.MTransformationMatrix(ik_pole_rot_mat).translation(oMa.MSpace.kWorld)\n ik_pole_vec.y = 0\n\n ik_pole_rot = oMa.MVector.kZaxisVector.rotateTo(ik_pole_vec).asEulerRotation()\n\n # Calculate ik rotations\n tri_a_len = (fk_02_off_world_trans - fk_01_off_world_trans).length()\n tri_b_len = (fk_03_off_world_trans - fk_02_off_world_trans).length()\n tri_c_len = (ik_main_world_trans - fk_01_world_trans).length()\n\n if tri_c_len >= def_len:\n fk_02_angle = 0\n fk_01_angle = 0\n else:\n fk_02_angle = math.pi - solve_triangle(tri_a_len, tri_b_len, tri_c_len, \"C\")\n fk_01_angle = -solve_triangle(tri_a_len, tri_b_len, tri_c_len, \"B\")\n\n # Add rotations together\n fk_01_temp = oMa.MEulerRotation(fk_01_angle, ik_pole_rot.y, 0)\n\n ik_dir_mat = compose_mat(ik_dir_rot)\n fk_01_mat = compose_mat(fk_01_temp)\n rot_mat = fk_01_mat * ik_dir_mat\n\n # Apply everything\n fk_01_rot = get_rot_from_mat(rot_mat)\n fk_02_rot = (fk_02_angle, 0, 0)\n\n fk_01_rot_plugs = get_rot_plugs(node.fk_01_conn)\n for i, plug in enumerate(fk_01_rot_plugs):\n plug.setMAngle(oMa.MAngle(fk_01_rot[i], oMa.MAngle.kRadians))\n\n fk_02_rot_plugs = get_rot_plugs(node.fk_02_conn)\n for i, plug in enumerate(fk_02_rot_plugs):\n if not plug.isLocked:\n plug.setMAngle(oMa.MAngle(fk_02_rot[i], oMa.MAngle.kRadians))\n\n # Calculate ankle rotation\n fk_03_rot = rot_world_space_to_local_space(node.ik_main_conn, get_parent(node.fk_03_conn))\n\n fk_03_rot_plugs = get_rot_plugs(node.fk_03_conn)\n for i, plug in enumerate(fk_03_rot_plugs):\n plug.setMAngle(oMa.MAngle(fk_03_rot[i], oMa.MAngle.kRadians))",
"def spline_ik(self):\n ikHandle, ikEffector, ikCurve = pm.ikHandle(\n name=self.name + \"_ikh\",\n startJoint=self.joints[0],\n endEffector=self.joints[-1],\n solver='ikSplineSolver',\n simplifyCurve=False\n )\n\n # Get the number of digits so we can set the zfill correctly,\n digits = len(str(len(ikCurve.cv)))\n\n # Iterate over each cv and create a cluster deformer,\n for i, cv in enumerate(ikCurve.cv):\n cluster_node, cluster_handle = pm.cluster(cv)\n cluster_handle.rename(\n ikCurve.nodeName() + '_ch_{}'.format(str(i).zfill(digits))\n )",
"def delete_ik_fk_keys(*args):\n if not check_robot_selection():\n pm.warning('No robots selected; ' \\\n 'Select at least one robot.')\n\n keyed_attrs = {__TARGET_CTRL_PATH: ['ik',\n 'visibility',\n 'ikSolution1',\n 'ikSolution2',\n 'ikSolution3'],\n __FK_CTRLS_PATH: ['visibility'],\n __A1_FK_CTRL_PATH: ['rotateY'],\n __A2_FK_CTRL_PATH: ['rotateX'],\n __A3_FK_CTRL_PATH: ['rotateX'],\n __A4_FK_CTRL_PATH: ['rotateZ'],\n __A5_FK_CTRL_PATH: ['rotateX'],\n __A6_FK_CTRL_PATH: ['rotateZ']}\n\n robots = get_robot_roots()\n\n current_frame = pm.currentTime()\n for robot in robots:\n target_ctrl_path = get_target_ctrl_path(robot)\n tool_ctrl_path = get_tool_ctrl_path(robot)\n # Check if there's a keyframe set on the target_CTRL.ik attribute\n key = pm.keyframe(target_ctrl_path,\n attribute='ik',\n query=True,\n time=current_frame)\n\n # If there is no keyframe set on the IK attribute, continue to the\n # next robot\n if not key:\n pm.warning('{} has no IK|FK keyframe at frame {}' \\\n .format(robot, current_frame))\n continue\n\n # If there is a keyframe on the IK attribute, we also check if there's\n # a keyframe on an FK controller as well, as we only consider there to\n # be a proper IK or FK keyframe if both are true\n # Note, we only need to check a single FK controller as they should all\n # be keyframed (or not) together\n fk_test_handle_path = format_path(__A1_FK_CTRL_PATH + '.rotateY', robot)\n fk_key = pm.keyframe(fk_test_handle_path,\n query=True,\n time=current_frame)\n # If there is no keyframe set on the FK controller attribute,\n # continue to the next robot\n if not fk_key:\n pm.warning('{} has no IK|FK keyframe at frame {}' \\\n .format(robot, current_frame))\n continue \n\n for obj in keyed_attrs:\n for attr in keyed_attrs[obj]:\n pm.cutKey(format_path(obj, robot),\n time=current_frame,\n attribute=attr,\n option=\"keys\")\n\n if pm.objExists(tool_ctrl_path):\n pm.cutKey(tool_ctrl_path,\n time=current_frame,\n attribute='visibility',\n option=\"keys\")\n\n if pm.checkBox('cb_keyToolCtrl', query=True, value=True):\n if pm.objExists(tool_ctrl_path):\n pm.cutKey(tool_ctrl_path,\n time=current_frame,\n attribute='translate',\n option=\"keys\")\n pm.cutKey(tool_ctrl_path,\n time=current_frame,\n attribute='rotate',\n option=\"keys\")\n else:\n pm.cutKey(target_ctrl_path,\n time=current_frame,\n attribute='translate',\n option=\"keys\")\n pm.cutKey(target_ctrl_path,\n time=current_frame,\n attribute='rotate',\n option=\"keys\")",
"def key_ik_fk(*args):\n if not pm.window(\"mimic_win\", exists=True):\n return\n\n current_tab = pm.tabLayout('switcher_tab_layout',\n query=True,\n selectTabIndex=True)\n\n try:\n if current_tab == 1:\n key_ik()\n elif current_tab == 2:\n key_fk()\n except:\n pm.warning('Error keying IK/FK')",
"def get_Amn_one_k(self, ik):\n raise NotImplementedError(\n \"The get_Amn_one_k method is should be overrided.\")",
"def key_fk(*args):\n\n robots = get_robot_roots()\n if not robots:\n pm.warning('No robots selected')\n return\n\n for robot in robots:\n # If the robot's IK attribute is on, switch the robot to\n # FK mode before proceeding\n target_ctrl_path = get_target_ctrl_path(robot)\n tool_ctrl_path = get_tool_ctrl_path(robot)\n\n if pm.getAttr(target_ctrl_path + '.ik'):\n switch_to_fk(robot)\n\n # We first check if the target/tool controller transformation and\n # orientation is already aligned with the FK chain. If so, it\n # indicates that we're performing an IK to FK switch, and we\n # keyframe its position and orientation directly, without\n # snapping the IK control to the FK hierarchy. This is to avoid\n # unneccessarily changing the controllers Euler Angle rotation\n # representation that can cause unpredictable behavior between frames\n\n if pm.objExists(tool_ctrl_path):\n ctrl_ik = tool_ctrl_path\n ctrl_fk = format_path(__TOOL_CTRL_FK_PATH, robot)\n\n # If robot doesn't have a tool controller, use target_CTRL.\n else:\n ctrl_ik = target_ctrl_path\n ctrl_fk = format_path(__TCP_HDL_PATH, robot)\n\n if not _ik_and_fk_aligned(ctrl_ik, ctrl_fk):\n _snap_ik_target_to_fk(robot)\n\n # Key all FK elements\n try:\n pm.setKeyframe(format_path(__A1_FK_CTRL_PATH, robot),\n attribute='rotateY')\n pm.setKeyframe(format_path(__A2_FK_CTRL_PATH, robot),\n attribute='rotateX')\n pm.setKeyframe(format_path(__A3_FK_CTRL_PATH, robot),\n attribute='rotateX')\n pm.setKeyframe(format_path(__A4_FK_CTRL_PATH, robot),\n attribute='rotateZ')\n pm.setKeyframe(format_path(__A5_FK_CTRL_PATH, robot),\n attribute='rotateX')\n pm.setKeyframe(format_path(__A6_FK_CTRL_PATH, robot),\n attribute='rotateZ')\n\n # Key visibility of FK controllers\n for i in range(6):\n pm.setKeyframe(format_path(__FK_CTRLS_PATH, robot),\n attribute='visibility')\n except:\n pm.warning('Error setting FK keys in FK mode')\n\n # Key all IK elements\n try:\n pm.setKeyframe(target_ctrl_path, attribute='ik')\n pm.setKeyframe(target_ctrl_path, attribute='v', value=0)\n\n if pm.objExists(tool_ctrl_path):\n pm.setKeyframe(tool_ctrl_path, attribute='v')\n\n # Key tool controllers\n if pm.checkBox('cb_keyToolCtrl', query=True, value=True):\n if pm.objExists(tool_ctrl_path):\n pm.setKeyframe(tool_ctrl_path, attribute='translate')\n pm.setKeyframe(tool_ctrl_path, attribute='rotate')\n else:\n pm.setKeyframe(target_ctrl_path, attribute='translate')\n pm.setKeyframe(target_ctrl_path, attribute='rotate')\n\n except:\n pm.warning('Error setting IK keys in FK mode')",
"def _ik_and_fk_aligned(ik_ctrl, tcp_handle):\n\n # Define some small number to threshold our output\n delta = .0001\n\n # Initialize variables\n # translation_is_aligned = False\n # rotation_is_aligned = False\n ik_fk_are_aligned = False\n\n # Find the translation of each object and compare them\n ik_trans = pm.xform(ik_ctrl, q=True, rp=True, ws=True)\n tcp_trans = pm.xform(tcp_handle, q=True, rp=True, ws=True)\n\n # Find the distance between the ik controller and the tcp handle\n trans_diff = math.sqrt((ik_trans[0] - tcp_trans[0]) ** 2\n + (ik_trans[1] - tcp_trans[1]) ** 2\n + (ik_trans[2] - tcp_trans[2]) ** 2)\n\n if round(trans_diff, 6) < delta:\n ik_fk_are_aligned = True\n\n return ik_fk_are_aligned",
"def make_knode(self,i,path_len=0):\n return Knode(path_len=path_len,\\\n ident=self.nodes[i].ident,\\\n lindex=i)",
"def __setattr__(self, k, v):\n if k[:1] != '_' and \\\n not k in ('dimensions', 'typecode'):\n if k not in self._ncattrs:\n self._ncattrs += (k, )\n object.__setattr__(self, k, v)",
"def setKi(self, integral_gain):\n self.__Ki = integral_gain",
"def _establish_netmiko_handler(self, opt, net_connect_dict):\n\n key = opt['ip']\n try:\n net_connect = ConnectHandler(**opt)\n except NetMikoTimeoutException as error:\n reason = error.message\n raise ValueError('[Netmiko Timeout Exception:] %s' % reason)\n except NetMikoAuthenticationException as error:\n reason = error.message\n raise ValueError('[Netmiko Authentication Exception:] %s' % reason)\n except SSHException as error:\n reason = error.message\n raise ValueError('[SSH Exception:] %s' % reason)\n except Exception as error:\n reason = error.message\n raise ValueError('Failed to connect to switch %s' % reason)\n return net_connect",
"def _add_control_channel(self, attrs):\n _cable_data = {}\n _cable_data[\"crate\"] = self._crate\n _cable_data[\"module\"] = self._module\n _cable_data[\"channel\"] = int(attrs.get('number', \"\"))\n _cable_data[\"name\"] = str(attrs.get('name', \"\"))\n self._data.append(_cable_data)",
"def _snap_ik_target_to_fk(robot):\n\n # Snap IK Ctrl to FK location\n # If robot has tool controller, use that\n target_ctrl_path = get_target_ctrl_path(robot)\n tool_ctrl_path = get_tool_ctrl_path(robot)\n tool_ctrl_fk_path = get_tool_ctrl_fk_path(robot)\n tcp_hdl_path = format_path(__TCP_HDL_PATH, robot)\n\n if pm.objExists(tool_ctrl_path):\n ctrl_ik = tool_ctrl_path\n ctrl_fk = tool_ctrl_fk_path\n\n # If robot doesn't have a tool controller, use target_CTRL.\n else:\n ctrl_ik = target_ctrl_path\n ctrl_fk = tcp_hdl_path\n\n # Snap tool_CTRL to tool_CTRL_FK.\n try:\n pm.snapTransforms(s=ctrl_fk, d=ctrl_ik)\n except:\n pm.warning('Coundn\\'t snap {} tool_CTRL handle to FK' \\\n .format(robot))",
"def ikFkMatch(\n namespace,\n ikfk_attr,\n ui_host,\n fks,\n ik,\n upv,\n ik_rot=None,\n key=None):\n\n # returns a pymel node on the given name\n def _get_node(name):\n # type: (Text) -> pm.nodetypes.Transform\n name = anim_utils.stripNamespace(name)\n if namespace:\n node = anim_utils.getNode(\":\".join([namespace, name]))\n else:\n node = anim_utils.getNode(name)\n\n if not node:\n mgear.log(\"Can't find object : {0}\".format(name), mgear.sev_error)\n\n return node\n\n # returns matching node\n def _get_mth(name):\n # type: (str) -> pm.nodetypes.Transform\n tmp = name.split(\"_\")\n tmp[-1] = \"mth\"\n query = \"_\".join(tmp)\n n = _get_node(query)\n\n if not n:\n mgear.log(\"Can't find mth object : {0} for {1}\".format(query, name), mgear.sev_comment)\n return _get_node(name)\n else:\n return n\n\n # get things ready\n fk_ctrls = [_get_node(x) for x in fks]\n fk_goals = [_get_mth(x) for x in fks]\n ik_ctrl = _get_node(ik)\n ik_goal = _get_mth(ik)\n upv_ctrl = _get_node(upv)\n\n if ik_rot:\n ik_rot_node = _get_node(ik_rot)\n ik_rot_goal = _get_mth(ik_rot)\n\n ui_node = _get_node(ui_host)\n o_attr = ui_node.attr(ikfk_attr)\n\n switch_to_fk = (o_attr.get() == 1.0)\n switch_to_ik = (not switch_to_fk)\n\n # sets keyframes before snapping\n if key:\n _all_controls = []\n _all_controls.extend(fk_ctrls)\n _all_controls.extend([ik_ctrl, upv_ctrl, ui_node])\n if ik_rot:\n _all_controls.extend([ik_rot_node])\n [cmds.setKeyframe(\"{}\".format(elem),\n time=(cmds.currentTime(query=True) - 1.0))\n for elem in _all_controls]\n\n # if is IKw then snap FK\n if switch_to_fk:\n\n world_matrices = []\n for src, _ in zip(fk_goals, fk_ctrls):\n world_matrices.append(getMatrix(src))\n\n o_attr.set(0.0)\n\n for mat, dst in zip(world_matrices, fk_ctrls):\n setMatrix(dst, mat)\n\n for mat, dst in zip(world_matrices, fk_ctrls):\n setMatrix(dst, mat)\n\n # if is FKw then sanp IK\n elif switch_to_ik:\n\n shoulder_mat = getMatrix(fk_goals[0])\n ik_mat = getMatrix(ik_goal)\n\n # transform.matchWorldTransform(ik_goal, ik_ctrl)\n if ik_rot:\n rot_mat = getMatrix(ik_rot_goal)\n # transform.matchWorldTransform(ik_rot_goal, ik_rot_node)\n\n upv_mat = getMatrix(fk_goals[2])\n\n o_attr.set(1.0)\n\n setMatrix(ik_ctrl, ik_mat)\n setMatrix(upv_ctrl, upv_mat)\n # for _ in range(10):\n # fk_ctrls[0].setMatrix(shoulder_mat, worldSpace=True)\n\n for _ in range(20):\n cmds.xform(fk_ctrls[0].name(), ws=True, matrix=shoulder_mat)\n if ik_rot:\n setMatrix(ik_rot_node, rot_mat)\n\n # transform.matchWorldTransform(fk_goals[1], upv_ctrl)\n # calculates new pole vector position\n start_end = (fk_goals[-1].getTranslation(space=\"world\") - fk_goals[1].getTranslation(space=\"world\"))\n start_mid = (fk_goals[2].getTranslation(space=\"world\") - fk_goals[1].getTranslation(space=\"world\"))\n\n dot_p = start_mid * start_end\n proj = float(dot_p) / float(start_end.length())\n proj_vector = start_end.normal() * proj\n arrow_vector = (start_mid - proj_vector) * 1.5\n arrow_vector *= start_end.normal().length()\n final_vector = (arrow_vector + fk_goals[2].getTranslation(space=\"world\"))\n upv_ctrl.setTranslation(final_vector, space=\"world\")\n\n # sets blend attribute new value\n # o_attr.set(1.0)\n roll_att = ui_node.attr(ikfk_attr.replace(\"blend\", \"roll\"))\n roll_att.set(0.0)\n\n setMatrix(ik_ctrl, ik_mat)\n if ik_rot:\n setMatrix(ik_rot_node, rot_mat)\n # upv_ctrl.setMatrix(upv_mat, worldSpace=True)\n for _ in range(20):\n cmds.xform(fk_ctrls[0].name(), ws=True, matrix=shoulder_mat)\n\n # sets keyframes\n if key:\n [cmds.setKeyframe(\"{}\".format(elem),\n time=(cmds.currentTime(query=True)))\n for elem in _all_controls]",
"def setKi(self, integral_gain):\n self.Ki = integral_gain",
"def setKi(self, integral_gain):\n\t\tself.Ki = integral_gain",
"def make_control_knowledge_variables(self, horizon):\n # You might want to save your variables here, or feel free to make as\n # many data structures as you need to keep track of them.\n\n self.control_fluent_codes = {}\n\n \"\"\" *** YOUR CODE HERE *** \"\"\"\n\n # DID NOT DEFINE ANY EXTRA VARIABLES, ALL DONE IN THE METHOD BELOW",
"def fk_to_ik(node):\n # Get relevant data\n ik_pole_off = get_parent(node.ik_pole_conn)\n\n world_trans_ik_pole_off = get_world_trans(ik_pole_off)\n world_trans_fk_01 = get_world_trans(node.fk_01_conn)\n world_trans_fk_02 = get_world_trans(node.fk_02_conn)\n world_trans_fk_03 = get_world_trans(node.fk_03_conn)\n world_trans_ik_pole = get_world_trans(node.ik_pole_conn)\n\n world_rot_fk_03 = get_world_rot(node.fk_03_conn)\n\n # calculate ik pole position\n ik_pole_mid_point = (world_trans_fk_01 + world_trans_fk_03) / 2\n ik_pole_base = world_trans_fk_02 - ik_pole_mid_point\n\n # Handle the case when the leg is fully stretched\n if ik_pole_base.length() <= 0.0001:\n rot_fk_01 = get_rot_as_quat(node.fk_01_conn)\n rot_fk_02 = get_rot_as_quat(node.fk_02_conn)\n\n rot = rot_fk_01 * rot_fk_02\n\n ik_pole_base = oMa.MVector(2 * (rot.x * rot.z + rot.w * rot.y),\n 2 * (rot.y * rot.z - rot.w * rot.x),\n 1 - 2 * (rot.x * rot.x + rot.y * rot.y))\n\n ik_pole_len = (world_trans_ik_pole - world_trans_fk_02).length()\n\n pos_ik_pole = world_trans_fk_02 + ik_pole_base.normalize() * ik_pole_len - world_trans_ik_pole_off\n\n # Get the destination MPlugs\n ik_main_trans_plugs = get_trans_plugs(node.ik_main_conn)\n ik_main_rot_plugs = get_rot_plugs(node.ik_main_conn)\n ik_pole_trans_plugs = get_trans_plugs(node.ik_pole_conn)\n\n # Set the new values\n for i, plug in enumerate(ik_main_trans_plugs):\n plug.setFloat(world_trans_fk_03[i])\n\n for i, plug in enumerate(ik_main_rot_plugs):\n plug.setMAngle(oMa.MAngle(world_rot_fk_03[i], oMa.MAngle.kRadians))\n\n for i, plug in enumerate(ik_pole_trans_plugs):\n plug.setFloat(pos_ik_pole[i])"
] | [
"0.6865341",
"0.670034",
"0.6326762",
"0.61733705",
"0.6086193",
"0.59026164",
"0.57446015",
"0.55481964",
"0.540773",
"0.5367092",
"0.53659767",
"0.51581305",
"0.5066359",
"0.493914",
"0.48905978",
"0.48687443",
"0.48397043",
"0.48023486",
"0.47860128",
"0.47808054",
"0.4765842",
"0.46816355",
"0.4661322",
"0.46589276",
"0.46124357",
"0.46079683",
"0.4583597",
"0.45763084",
"0.4567212",
"0.45632422"
] | 0.7807563 | 0 |
Create soft ik constraint on ikHandle. | def create_soft_ik(ik_ctrl, ik_joints, ik_handle):
# get name and constant variables
name = ik_handle+'Soft'
parent = utils.get_parent(ik_joints[0])
ik_handle_parent = utils.get_parent(ik_handle)
# get total length of joint chain
chain_length = 0
for jnt in ik_joints[1:]:
chain_length += abs(mc.getAttr(jnt+'.tx'))
mc.addAttr(ik_joints[0], ln='softIkChainLength', k=1, dv=chain_length)
#create dist node, (distance between top ik_joint and ik_handle) = X
soft_ik_root = utils.snap_locator(ik_joints[0], node_type='transform')
soft_ik_root = mc.rename(soft_ik_root, name+'_root_'+utils.get_suffix('transform'))
dist = utils.create_distance_reader(soft_ik_root, ik_handle_parent)
#create the dSoft and softIK attributes on the controller
mc.addAttr(ik_ctrl, ln='softIK', min=0, k=1)
ctrl_clamp = mc.createNode('clamp')
mc.connectAttr(ik_ctrl+'.softIK', ctrl_clamp+'.inputR')
mc.setAttr(ctrl_clamp+'.minR', 0.0001)
mc.setAttr(ctrl_clamp+'.maxR', 10000000)
#create node network for soft IK
da_pma = mc.createNode('plusMinusAverage', n=name+'_da_pma')
x_minus_da_pma = mc.createNode('plusMinusAverage', n=name+'_x_minus_da_pma')
negate_x_minus_md = mc.createNode('multiplyDivide', n=name+'_negate_x_minus_md')
divBy_dSoft_md = mc.createNode('multiplyDivide', n=name+'_divBy_dSoft_md')
pow_e_md = mc.createNode('multiplyDivide', n=name+'_pow_e_md')
one_minus_pow_e_pma = mc.createNode('plusMinusAverage', n=name+'_one_minus_pow_e_pma')
times_dSoft_md = mc.createNode('multiplyDivide', n=name+'_times_dSoft_md')
plus_da_pma = mc.createNode('plusMinusAverage', n=name+'_plus_da_pma')
da_cond = mc.createNode('condition', n=name+'_da_cond')
dist_diff_pma = mc.createNode('plusMinusAverage', n=name+'_dist_diff_pma')
defaultPos_pma = mc.createNode('plusMinusAverage', n=name+'_defaultPos_pma')
#set operations
mc.setAttr(da_pma+'.operation', 2)
mc.setAttr(x_minus_da_pma+'.operation', 2)
mc.setAttr(negate_x_minus_md+'.operation', 1)
mc.setAttr(divBy_dSoft_md+'.operation', 2)
mc.setAttr(pow_e_md+'.operation', 3)
mc.setAttr(one_minus_pow_e_pma+'.operation', 2)
mc.setAttr(times_dSoft_md+'.operation', 1)
mc.setAttr(plus_da_pma+'.operation', 1)
mc.setAttr(da_cond+'.operation', 5)
mc.setAttr(dist_diff_pma+'.operation', 2)
mc.setAttr(defaultPos_pma+'.operation', 2)
#make connections
mc.connectAttr(ik_joints[0]+'.softIkChainLength', da_pma+'.input1D[0]')
mc.connectAttr(ctrl_clamp+'.outputR', da_pma+'.input1D[1]')
mc.connectAttr(dist+'.localDistance', x_minus_da_pma+'.input1D[0]')
mc.connectAttr(da_pma+'.output1D', x_minus_da_pma+'.input1D[1]')
mc.connectAttr(x_minus_da_pma+'.output1D', negate_x_minus_md+'.input1X')
mc.setAttr(negate_x_minus_md+'.input2X', -1)
mc.connectAttr(negate_x_minus_md+'.outputX', divBy_dSoft_md+'.input1X')
mc.connectAttr(ctrl_clamp+'.outputR', divBy_dSoft_md+'.input2X')
mc.setAttr(pow_e_md+'.input1X', 2.718281828)
mc.connectAttr(divBy_dSoft_md+'.outputX', pow_e_md+'.input2X')
mc.setAttr(one_minus_pow_e_pma+'.input1D[0]', 1)
mc.connectAttr(pow_e_md+'.outputX' , one_minus_pow_e_pma+'.input1D[1]')
mc.connectAttr(one_minus_pow_e_pma+'.output1D', times_dSoft_md+'.input1X')
mc.connectAttr(ctrl_clamp+'.outputR', times_dSoft_md+'.input2X')
mc.connectAttr(times_dSoft_md+'.outputX', plus_da_pma+'.input1D[0]')
mc.connectAttr(da_pma+'.output1D', plus_da_pma+'.input1D[1]')
mc.connectAttr(da_pma+'.output1D', da_cond+'.firstTerm')
mc.connectAttr(dist+'.localDistance', da_cond+'.secondTerm')
mc.connectAttr(dist+'.localDistance', da_cond+'.colorIfFalseR')
mc.connectAttr(plus_da_pma+'.output1D', da_cond+'.colorIfTrueR')
mc.connectAttr(da_cond+'.outColorR', dist_diff_pma+'.input1D[0]')
mc.connectAttr(dist+'.localDistance', dist_diff_pma+'.input1D[1]')
mc.setAttr(defaultPos_pma+'.input1D[0]', 0)
mc.connectAttr(dist_diff_pma+'.output1D', defaultPos_pma+'.input1D[1]')
# Create new ik aim node
up = [1,0,0]
aim = [0,1,0]
grp = mc.createNode('transform', n=name+'_soft_aim_'+utils.get_suffix('transform'), p=ik_handle_parent)
gAim = mc.createNode('transform', n=name+'_soft_'+utils.get_suffix('transform'), p=grp)
mc.aimConstraint(soft_ik_root,
grp,
aim=aim,
u=up,
wu=up,
wut='objectRotation',
wuo=ik_ctrl,
n=grp+'_ac')
mc.connectAttr(defaultPos_pma+'.output1D', gAim+'.ty')
mc.pointConstraint(gAim, ik_handle)
mc.parent(ik_handle, gAim)
# parent stuff
if parent:
mc.parent(soft_ik_root, parent)
return gAim | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_keep_in_constraint(self,der=2,limit=1e1,weight=1e5):\n print(\"Creating Keep in constraint\")\n constr = dict()\n constr['constraint_type'] = \"ellipsoid\"\n constr['weight'] = self.accel_weight\n constr['keep_out'] = False\n constr['der'] = der\n constr['x0'] = np.zeros(3)\n A = np.matrix(np.identity(3))\n limit = self.accel_lim\n A[0,0] = 1/limit**2\n A[1,1] = 1/limit**2\n A[2,2] = 1/limit**2\n constr['rot_mat'] = np.identity(3)\n constr['A'] = A\n\n\n self.qr_polytraj.add_constraint(constr['constraint_type'],constr,dynamic_weighting=False,sum_func=False)\n\n # self.qr_polytraj.run_astro()\n # self.update_path_markers()\n # acc_wp = self.get_accel_at_waypoints(\"main\")\n # self.interactive_marker_worker.make_controls(self.qr_polytraj.waypoints)\n # self.interactive_marker_worker.update_controls(self.qr_polytraj.waypoints,acc_wp = acc_wp)",
"def create_fk_ik_switch(switch_ctrl, ik_handles, fk_ctrls, ik_ctrls, vis_ctrl=None, switch_attr_name='IK', vis_attr_name='fkIkCtrlVis'):\n\n fk_ctrls = mc.ls(fk_ctrls)\n ik_ctrls = mc.ls(ik_ctrls)\n ik_handles = mc.ls(ik_handles)\n\n if not vis_ctrl:\n vis_ctrl = switch_ctrl\n\n # Create attributes\n if not mc.objExists(switch_ctrl+'.'+switch_attr_name):\n mc.addAttr(switch_ctrl, ln=switch_attr_name, min=0, max=1, k=1)\n\n if not mc.objExists(vis_ctrl+'.'+vis_attr_name):\n mc.addAttr(vis_ctrl, ln=vis_attr_name, at='enum', en='auto:fkOnly:ikOnly:both', k=1)\n\n # Connect ik handles\n for handle in ik_handles:\n mc.connectAttr(switch_ctrl+'.'+switch_attr_name, handle+'.ikBlend')\n\n # Create swicth for ik ctrl\n ik_choice = utils.create_node('choice', n=vis_attr_name+'_ik_choice')\n mc.connectAttr(vis_ctrl+'.'+vis_attr_name, ik_choice+'.selector')\n mc.connectAttr(switch_ctrl+'.'+switch_attr_name, ik_choice+'.input[0]')\n mc.setAttr(ik_choice+'.input[1]', 0)\n mc.setAttr(ik_choice+'.input[2]', 1)\n mc.setAttr(ik_choice+'.input[3]', 1)\n\n for ctrl in ik_ctrls:\n mc.setAttr(ctrl+'.v', l=0)\n mc.connectAttr(ik_choice+'.output', ctrl+'.v', f=1)\n mc.setAttr(ctrl+'.v', l=1)\n\n # Create swicth for ik ctrl\n fk_choice = utils.create_node('choice', n=vis_attr_name+'_fk_choice')\n fk_rv = utils.create_node('reverse', n=vis_attr_name+'_fk_choice')\n mc.connectAttr(switch_ctrl+'.'+switch_attr_name, fk_rv+'.inputX')\n mc.connectAttr(vis_ctrl+'.'+vis_attr_name, fk_choice+'.selector')\n mc.connectAttr(fk_rv+'.outputX', fk_choice+'.input[0]')\n mc.setAttr(fk_choice+'.input[1]', 1)\n mc.setAttr(fk_choice+'.input[2]', 0)\n mc.setAttr(fk_choice+'.input[3]', 1)\n\n for ctrl in fk_ctrls:\n mc.setAttr(ctrl+'.v', l=0)\n mc.connectAttr(fk_choice+'.output', ctrl+'.v', f=1)\n mc.setAttr(ctrl+'.v', l=1)\n\n return True",
"def createConstraint(self):\n return _libsbml.Model_createConstraint(self)",
"def createConstraint(*argv):",
"def _set_constraint(self):\n pass",
"def _parse_initbound(self) :\n\t\tlogging.debug(\"Parsing initbound soft constraints\")",
"def getCrossFormedGraphConstraintsPreventAnySwitch(self):\n makeLayer = self.makeLayer\n addNodeToLayer = self.addNodeToLayer\n eastWestEdgeFromTo = self.eastWestEdgeFromTo\n graph = self.graph\n setInLayerOrderConstraint = self.setInLayerOrderConstraint\n\n leftLayer = makeLayer(graph)\n rightLayer = makeLayer(graph)\n\n topLeft = addNodeToLayer(leftLayer)\n bottomLeft = addNodeToLayer(leftLayer)\n topRight = addNodeToLayer(rightLayer)\n bottomRight = addNodeToLayer(rightLayer)\n\n eastWestEdgeFromTo(topLeft, bottomRight)\n eastWestEdgeFromTo(bottomLeft, topRight)\n setInLayerOrderConstraint(topRight, bottomRight)\n setInLayerOrderConstraint(topLeft, bottomLeft)\n\n return graph",
"def soft_constraint ( self , var , value , name = '' , title = '' ) :\n \n assert isinstance ( var , ROOT.RooAbsReal ) ,\\\n \"Invalid ``v'': %s/%s\" % ( var , type ( var ) ) \n assert isinstance ( value , VE ),\\\n \"Invalid ``value'': %s/%s\" % ( value , type ( value ) )\n\n assert 0 < value.cov2() , 'Invalid error for %s' % value\n \n name = name if name else 'Gauss_%s_%s' % ( var.GetName() , self.name ) \n title = title if title else 'Gaussian Constraint(%s,%s) at %s' % ( var.GetName() , self.name , value )\n \n # value & error as RooFit objects: \n val = ROOT.RooFit.RooConst ( value.value () )\n err = ROOT.RooFit.RooConst ( value.error () )\n \n # Gaussian constrains \n gauss = ROOT.RooGaussian ( self.var_name ( name ) , title , var , val , err )\n \n # keep all the created technical stuff \n self.aux_keep.append ( val )\n self.aux_keep.append ( err )\n self.aux_keep.append ( gauss )\n\n self.info ('Constraint is created %s=%s' % ( var.name , value ) )\n return gauss",
"def _create_hardsigmoid(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n\n node.attribute.extend([\n helper.make_attribute('alpha', op.alpha),\n helper.make_attribute('beta', op.gamma),\n ])\n return node",
"def declare_physical_budget(model, k):\n\n m = model\n\n m.budget = pe.Constraint(expr=sum(1*m.delta_gen[g] for g in m.delta_gen.index_set()) +\\\n sum(1*m.delta_branch[k] for k in m.delta_branch.index_set()) +\\\n sum(1*m.delta_load[b] for b in m.delta_load.index_set()) +\\\n sum(1*m.delta_bus[b] for b in m.delta_bus.index_set()) == k)",
"def test_create_hyperflex_auto_support_policy(self):\n pass",
"def declare_physical_budget(model, k):\n\n m = model\n\n m.budget = pe.Constraint(expr=sum(5*m.delta_gen[g] for g in m.delta_gen.index_set()) +\\\n sum(1*m.delta_branch[k] for k in m.delta_branch.index_set()) +\\\n sum(3*m.delta_load[b] for b in m.delta_load.index_set()) +\\\n sum(1*m.delta_bus[b] for b in m.delta_bus.index_set()) == k)",
"def _create_hardsigmoid(cls, onnx_node, inputs, opset_version):\n alpha = onnx_node.getattr(\"alpha\", 0.2)\n beta = onnx_node.getattr(\"beta\", 0.5)\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(alpha, beta)",
"def switch_to_ik(robot):\n\n target_ctrl_path = get_target_ctrl_path(robot)\n tool_ctrl_path = get_tool_ctrl_path(robot)\n fk_ctrls_path = format_path(__FK_CTRLS_PATH, robot)\n\n try:\n # Turn FK control visibility off\n pm.setAttr(fk_ctrls_path + '.v', 0)\n\n # Turn IK control visibility on\n pm.setAttr(target_ctrl_path + '.v', 1)\n pm.setAttr(format_path(__TARGET_CTRL_PATH + '|{1}target_CTRLShape',\n robot) + '.visibility', 1)\n\n if pm.objExists(tool_ctrl_path):\n pm.setAttr(tool_ctrl_path + '.v'.format(robot), 1)\n except:\n # These aren't crucial to the switch as they're just visual, and \n # a connection or locking of any of these attributes might throw\n # an error, so let's just skip it\n pass\n \n try:\n # Snap IK Ctrl to FK location\n _snap_ik_target_to_fk(robot)\n except:\n raise MimicError('Error swithching to IK; could not snap IK CTRL to FK')\n\n ## Find closest IK configuration to current FK pose ##\n # Get FK config and all IK solutions\n ik_sols = find_ik_solutions(robot)\n fk_config = find_fk_config(robot)\n\n # Remove all MFG-specific offsets from the FK config\n solver_params = get_solver_params(robot)\n axis_offsets = solver_params.axis_offsets\n rot_directions = solver_params.rot_directions\n fk_config_norm = _normalize_fk_pose(fk_config, axis_offsets, rot_directions)\n\n ## TO-DO: account for FK config rotations above and below 180 degrees\n # Select the closes IK configuration to the given FK config\n ik_config = find_closest_config(fk_config_norm, ik_sols)\n\n # Match IK config to FK pose\n pm.setAttr(target_ctrl_path + '.ikSolution1', ik_config[0])\n pm.setAttr(target_ctrl_path + '.ikSolution2', ik_config[1])\n pm.setAttr(target_ctrl_path + '.ikSolution3', ik_config[2])\n\n # turn ik solve back on\n pm.setAttr(target_ctrl_path + '.ik', 1)",
"def createConstraint(schemaName, tableName, constraint):\n return constraints[constraint.kind](schemaName, tableName, constraint)",
"def create_sticky(ctx, iface, resource_config, **_):\n\n # Create a copy of the resource config for clean manipulation.\n params = \\\n dict() if not resource_config else resource_config.copy()\n\n lb_name = params.get(LB_NAME)\n policy_name = params.get(RESOURCE_NAME)\n\n if not lb_name:\n targs = \\\n utils.find_rels_by_node_type(\n ctx.instance,\n LB_TYPE)\n lb_name = \\\n targs[0].target.instance.runtime_properties[\n EXTERNAL_RESOURCE_ID]\n params.update({LB_NAME: lb_name})\n\n ctx.instance.runtime_properties[LB_NAME] = \\\n lb_name\n ctx.instance.runtime_properties[RESOURCE_NAME] = \\\n policy_name\n\n # Actually create the resource\n iface.create_sticky(params)",
"def ikHandle(*args, autoPriority: bool=True, connectEffector: bool=True, createCurve: bool=True,\n createRootAxis: bool=True, curve: Union[name, bool]=None, disableHandles:\n bool=True, enableHandles: bool=True, endEffector: Union[AnyStr, bool]=\"\", exists:\n AnyStr=\"\", forceSolver: bool=True, freezeJoints: bool=True, jointList: bool=True,\n name: Union[AnyStr, bool]=\"\", numSpans: int=0, parentCurve: bool=True,\n positionWeight: Union[float, bool]=0.0, priority: Union[int, bool]=0, rootOnCurve:\n bool=True, rootTwistMode: bool=True, setupForRPsolver: bool=True, simplifyCurve:\n bool=True, snapCurve: bool=True, snapHandleFlagToggle: bool=True,\n snapHandleToEffector: bool=True, solver: Union[AnyStr, bool]=\"\", startJoint:\n Union[AnyStr, bool]=\"\", sticky: Union[AnyStr, bool]=\"\", twistType: Union[AnyStr,\n bool]=\"\", weight: Union[float, bool]=0.0, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass",
"def convert_softshrink(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n dtype = infer_type(x).checked_type.dtype\n threshold = _expr.const(op.attr(\"lambda\"), dtype=dtype)\n zeros = _op.zeros_like(x)\n out = _op.where(x < -threshold, x + threshold, zeros) + _op.where(\n x > threshold, x - threshold, zeros\n )\n g.add_node(op.output(\"Out\")[0], out)",
"def constraints(self, x):\n pass",
"def make_constraint(constraint):\n if isinstance(constraint, str) and constraint == \"array-like\":\n return _ArrayLikes()\n if isinstance(constraint, str) and constraint == \"sparse matrix\":\n return _SparseMatrices()\n if isinstance(constraint, str) and constraint == \"random_state\":\n return _RandomStates()\n if constraint is callable:\n return _Callables()\n if constraint is None:\n return _NoneConstraint()\n if isinstance(constraint, type):\n return _InstancesOf(constraint)\n if isinstance(\n constraint, (Interval, StrOptions, Options, HasMethods, MissingValues)\n ):\n return constraint\n if isinstance(constraint, str) and constraint == \"boolean\":\n return _Booleans()\n if isinstance(constraint, str) and constraint == \"verbose\":\n return _VerboseHelper()\n if isinstance(constraint, str) and constraint == \"cv_object\":\n return _CVObjects()\n if isinstance(constraint, Hidden):\n constraint = make_constraint(constraint.constraint)\n constraint.hidden = True\n return constraint\n raise ValueError(f\"Unknown constraint type: {constraint}\")",
"def spline_ik(self):\n ikHandle, ikEffector, ikCurve = pm.ikHandle(\n name=self.name + \"_ikh\",\n startJoint=self.joints[0],\n endEffector=self.joints[-1],\n solver='ikSplineSolver',\n simplifyCurve=False\n )\n\n # Get the number of digits so we can set the zfill correctly,\n digits = len(str(len(ikCurve.cv)))\n\n # Iterate over each cv and create a cluster deformer,\n for i, cv in enumerate(ikCurve.cv):\n cluster_node, cluster_handle = pm.cluster(cv)\n cluster_handle.rename(\n ikCurve.nodeName() + '_ch_{}'.format(str(i).zfill(digits))\n )",
"def create_soft_block_at(self, x, y):\n cell_size = self.map.get_cell_size()\n obj = SoftBlock(\n parent=self.map,\n style={\n 'width': cell_size, \n 'height': cell_size * 2, \n 'z-index': layers['object'] }\n )\n # I am a soft block\n block(obj)\n # Randomly put an item after I dead\n make_breakable(self, obj, \n on_die=lambda: self.put_item_random(x, y))\n\n self.map.add_node(obj, x, y, 0, -cell_size)\n return obj",
"def _constraints_external(self):\n pass",
"def make_constraint ( self , var , value , name = '' , title = '' ) :\n \n ## create the gaussian constraint\n gauss = self.soft_constraint ( var , value , name , title ) \n \n cnts = ROOT.RooArgSet ( gauss )\n \n result = ROOT.RooFit.ExternalConstraints ( cnts )\n \n self.aux_keep.append ( cnts )\n \n return result",
"def make_untrainable(circuit, weights_initialized):\n\n def circuit_var(weights):\n circuit(weights_initialized)\n\n return circuit_var",
"def test_get_hyperflex_auto_support_policy_by_moid(self):\n pass",
"def constraints(self):\n ...",
"def constraint_level(self, soft_constraint):\n return soft_constraint.is_soft, len(soft_constraint.get_variables())",
"def SetConstraint(self, model) :\n if 'pp' in self.__type : self.SetPPConstraint( model )\n elif self.__type == 'prBin' and self.bound!=0 : self.SetPRBinConstraint( model )\n elif self.__type == 'prCat' and self.bound != 0 : self.SetPRCatConstraint(model)\n elif self.__type == 'prBinCat' and self.bound != 0 : self.SetPRBinCatConstraint(model)\n elif self.bound == 0 : return\n else : raise RuntimeError( 'SetConstraint : Unknown type for Constraint : ', self.__type )",
"def _set_restricted_policy(environ, bag):\n username = environ['tiddlyweb.usersign']['name']\n if username == 'GUEST':\n return\n bag.policy.owner = username\n # accept does not matter here\n for constraint in ['read', 'write', 'create', 'delete', 'manage']:\n setattr(bag.policy, constraint, [username])\n return"
] | [
"0.5469389",
"0.531737",
"0.52888745",
"0.51781154",
"0.50092465",
"0.49953595",
"0.49701515",
"0.4952075",
"0.49448365",
"0.49093467",
"0.49027547",
"0.4884955",
"0.4834274",
"0.47601178",
"0.47214177",
"0.46724012",
"0.4648716",
"0.46426857",
"0.4633451",
"0.46300042",
"0.46296412",
"0.4621398",
"0.46171993",
"0.45797682",
"0.45698234",
"0.4551361",
"0.4544794",
"0.45422685",
"0.45376104",
"0.45184177"
] | 0.7518777 | 0 |
Quaterion / matrix based twist for upper arms and legs. | def upper_twist(shoulder_jnt, up_arm_ik_jnt, lo_arm_ik_jnt, up_arm_jnt, lo_arm_jnt, up_arm_twist_jnts):
# Create a group that does not rotate and parent under the ik arm parent (shoulder)
stable_reader_grp = utils.create_node('transform', n=up_arm_ik_jnt+'_stable_reader', p=up_arm_ik_jnt)
# Create a grp that will rotate with ik arm
twist_reader_grp = utils.create_node('transform', n=up_arm_ik_jnt+'_twist_reader', p=up_arm_ik_jnt)
twist_driver_grp = utils.create_node('transform', n=up_arm_ik_jnt+'_twist', p=twist_reader_grp)
mc.parent(stable_reader_grp, shoulder_jnt)
mc.addAttr(twist_reader_grp, ln='twist', k=1)
# Now set up mult matrix and decomp nodes to extract the twist between the two nodes
mult_mtx = mc.createNode('multMatrix')
decomp_mtx = mc.createNode('decomposeMatrix')
quat_to_euler = mc.createNode('quatToEuler')
mc.connectAttr(stable_reader_grp+'.worldInverseMatrix', mult_mtx+'.matrixIn[1]')
mc.connectAttr(twist_reader_grp+'.worldMatrix', mult_mtx+'.matrixIn[0]')
mc.connectAttr(mult_mtx+'.matrixSum', decomp_mtx+'.inputMatrix')
mc.connectAttr(decomp_mtx+'.outputQuatX', quat_to_euler+'.inputQuatX')
mc.connectAttr(decomp_mtx+'.outputQuatW', quat_to_euler+'.inputQuatW')
utils.connect_negative(quat_to_euler+'.outputRotateX', twist_reader_grp+'.twist')
mc.connectAttr(twist_reader_grp+'.twist', twist_driver_grp+'.rx')
# Connect joints
mc.parentConstraint(twist_driver_grp, up_arm_jnt, mo=1)
mc.parentConstraint(lo_arm_ik_jnt, lo_arm_jnt, mo=1)
div = 1.0 / (len(up_arm_twist_jnts))
mdl = mc.createNode('multDoubleLinear')
mc.setAttr(mdl+'.input1', div)
mc.connectAttr(quat_to_euler+'.outputRotateX', mdl+'.input2')
for i, joint in enumerate(up_arm_twist_jnts[:-1]):
mc.connectAttr(mdl+'.output', joint+'.rx')
mc.orientConstraint(up_arm_ik_jnt, up_arm_twist_jnts[-1], mo=1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def lower_twist(lo_arm_ik_jnt, wrist_ik_jnt, lo_arm_jnt, lo_arm_twist_jnts, wrist_jnt=None):\n\n # Create a group that does not rotate and parent under the ik arm parent (shoulder)\n stable_reader_grp = utils.create_node('transform', n=lo_arm_ik_jnt+'_stable_reader', p=lo_arm_ik_jnt)\n\n # Create a grp that will rotate with ik arm\n twist_reader_grp = utils.create_node('transform', n=lo_arm_ik_jnt+'_twist_reader', p=lo_arm_ik_jnt)\n mc.addAttr(twist_reader_grp, ln='twist', k=1)\n\n mc.delete(mc.pointConstraint(wrist_ik_jnt, twist_reader_grp))\n mc.parent(twist_reader_grp, wrist_ik_jnt)\n\n # Now set up mult matrix and decomp nodes to extract the twist between the two nodes\n mult_mtx = mc.createNode('multMatrix')\n decomp_mtx = mc.createNode('decomposeMatrix')\n quat_to_euler = mc.createNode('quatToEuler')\n\n mc.connectAttr(stable_reader_grp+'.worldInverseMatrix', mult_mtx+'.matrixIn[1]')\n mc.connectAttr(twist_reader_grp+'.worldMatrix', mult_mtx+'.matrixIn[0]')\n mc.connectAttr(mult_mtx+'.matrixSum', decomp_mtx+'.inputMatrix')\n mc.connectAttr(decomp_mtx+'.outputQuatX', quat_to_euler+'.inputQuatX')\n mc.connectAttr(decomp_mtx+'.outputQuatW', quat_to_euler+'.inputQuatW')\n\n utils.connect_negative(quat_to_euler+'.outputRotateX', twist_reader_grp+'.twist')\n\n # Connect joints\n mc.parentConstraint(lo_arm_ik_jnt, lo_arm_jnt, mo=1)\n if wrist_jnt:\n mc.parentConstraint(wrist_ik_jnt, wrist_jnt, mo=1)\n\n div = 1.0 / (len(lo_arm_twist_jnts))\n\n mdl = mc.createNode('multDoubleLinear')\n mc.setAttr(mdl+'.input1', div)\n mc.connectAttr(quat_to_euler+'.outputRotateX', mdl+'.input2')\n\n for i, joint in enumerate(lo_arm_twist_jnts):\n mc.connectAttr(mdl+'.output', joint+'.rx')",
"def Areml_eigh(self):\n s,U = LA.eigh(self.Areml(),lower=True)\n i_pos = (s>1e-10)\n s = s[i_pos]\n U = U[:,i_pos]\n return s,U",
"def lfunc(x,u):\n return mpc.mtimes(u.T, R, u) + mpc.mtimes((x-goal).T, Q, (x-goal))",
"def other_quadrants(self, matrix):\n q2 = deepcopy(matrix)\n q2t = [-1, 1]\n q2f = []\n for j in range(len(q2)):\n list = [q2[j][i] * q2t[i] for i in range(2)]\n dist = self.get_dist(list[0], list[1])\n\n if dist <= self.max_distance:\n list.append(matrix[j][2])\n q2f.append(list)\n\n q3 = deepcopy(matrix)\n q3t = [-1, -1]\n q3f = []\n for j in range(len(q3)):\n list = [q3[j][i] * q3t[i] for i in range(2)]\n dist = self.get_dist(list[0], list[1])\n\n if dist <= self.max_distance:\n list.append(matrix[j][2])\n q3f.append(list)\n\n q4 = deepcopy(matrix)\n q4t = [1, -1]\n q4f = []\n for j in range(len(q3)):\n list = [q4[j][i] * q4t[i] for i in range(2)]\n dist = self.get_dist(list[0], list[1])\n\n if dist <= self.max_distance:\n list.append(matrix[j][2])\n q4f.append(list)\n\n return q2f, q3f, q4f",
"def illuminator_of_elfes():\n\n\t# Alpha - simplified by taking out the i by multiplying the outerproduct by 2i\n\talpha1i = np.matrix([[0, 0, 0, 2], [0, 0, 2, 0], [0, -2, 0, 0], [-2, 0, 0, 0]])\n\talpha2i = np.matrix([[0, 2, 0, 0], [-2, 0, 0, 0], [0, 0, 0, 2], [0, 0, -2, 0]])\n\talpha3i = np.matrix([[0, 0, 2, 0], [0, 0, 0, -2], [-2, 0, 0, 0], [0, 2, 0, 0]])\n\n\t# Betas - simplified by taking out the i by multiplication of outerprod by 2i\n\tbeta1i = np.matrix([[0, 0, 0, 2], [0, 0, -2, 0], [0, 2, 0, 0], [-2, 0, 0, 0]])\n\tbeta2i = np.matrix([[0, 0, 2, 0], [0, 0, 0, 2], [-2, 0, 0, 0], [0, -2, 0, 0]])\n\tbeta3i = np.matrix([[0, 2, 0, 0], [-2, 0, 0, 0], [0, 0, 0, -2], [0, 0, 2, 0]])\n\n\t# print(\"alpha 1\")\n\t# print(alpha1i)\n\t# print(\"\")\n\t# print(\"alpha 2\")\n\t# print(alpha2i)\n\t# print(\"\")\n\t# print(\"alpha 3\")\n\t# print(alpha3i)\n\t# print(\"\")\n\t# print(\"beta 1\")\n\t# print(beta1i)\n\t# print(\"\")\n\t# print(\"beta 2\")\n\t# print(beta2i)\n\t# print(\"\")\n\t# print(\"beta 3\")\n\t# print(beta3i)\n\t# print(\"\")\n\n\t# abperm_comb = [ np.multiply(alpha1i,-1), np.multiply(alpha2i,-1), np.multiply(alpha3i,-1), np.multiply(beta1i,-1), np.multiply(beta2i,-1), np.multiply(beta3i,-1)]\n\n\tabperm_comb = [alpha1i, alpha2i, alpha3i, beta1i, beta2i, beta3i]\n\treturn abperm_comb",
"def lap_mat(self):",
"def transform_traj(traj, ego_HTM, th):\r\n traj_pos = np.concatenate([traj[:, :2], np.ones([len(traj), 1])], axis=1)\r\n traj_pos = np.matmul(ego_HTM, traj_pos.transpose()).transpose()\r\n traj_th = traj[:, 2] - th\r\n traj_pos[:, 2] = traj_th\r\n return traj_pos",
"def fkine(robot, q):\n\n q = mat(q)\n n = robot.n\n if numrows(q)==1 and numcols(q)==n:\n t = robot.base\n for i in range(0,n):\n t = t * robot.links[i].tr(q[0,i])\n t = t * robot.tool\n return t\n else:\n if numcols(q) != n:\n raise Exception('bad data')\n t = []\n for qv in q: # for each trajectory point\n tt = robot.base\n for i in range(0,n):\n tt = tt * robot.links[i].tr(qv[0,i])\n t.append(tt*robot.tool)\n return t",
"def Phieqfun(Phibar,DPhieq,lambdas,mus,I,J,g):\n\n PhieqMat=Phibar*np.ones((J,I)) #initialize to flat nightside geopotentiAL\n \n for i in range(I):\n for j in range(J):\n #assume substellar point is (0,0)\n if -np.pi/2<lambdas[i]<np.pi/2: #only force the dayside\n PhieqMat[j,i]=PhieqMat[j,i]+DPhieq*np.cos(lambdas[i])*np.sqrt((1-mus[j]**2)) \n \n return PhieqMat",
"def inv_kin(self, xy):\n\n def distance_to_default(q, *args): \n \"\"\"Objective function to minimize\n Calculates the euclidean distance through joint space to the default\n arm configuration. The weight list allows the penalty of each joint \n being away from the resting position to be scaled differently, such\n that the arm tries to stay closer to resting state more for higher \n weighted joints than those with a lower weight.\n \n :param list q: the list of current joint angles\n :returns scalar: euclidean distance to the default arm position\n \"\"\"\n # weights found with trial and error, get some wrist bend, but not much\n weight = [1, 1, 1.3, 1] \n return np.sqrt(np.sum([(qi - q0i)**2 * wi\n for qi,q0i,wi in zip(q, self.q0, weight)]))\n\n def x_constraint(q, xy):\n \"\"\"Returns the corresponding hand xy coordinates for \n a given set of joint angle values [shoulder, elbow, wrist], \n and the above defined arm segment lengths, L\n \n :param list q: the list of current joint angles\n :returns: the difference between current and desired x position\n \"\"\"\n x = ( self.L[0]*np.cos(q[0]) + self.L[1]*np.cos(q[0]+q[1]) + \n self.L[2]*np.cos(q[0]+q[1]+q[2]) + self.L[3]*np.cos(np.sum(q)) ) - xy[0]\n return x\n\n def y_constraint(q, xy): \n \"\"\"Returns the corresponding hand xy coordinates for \n a given set of joint angle values [shoulder, elbow, wrist], \n and the above defined arm segment lengths, L\n \n :param list q: the list of current joint angles\n :returns: the difference between current and desired y position\n \"\"\"\n y = ( self.L[0]*np.sin(q[0]) + self.L[1]*np.sin(q[0]+q[1]) + \n self.L[2]*np.sin(q[0]+q[1]+q[2]) + self.L[3]*np.sin(np.sum(q)) ) - xy[1]\n return y\n\n return scipy.optimize.fmin_slsqp( func=distance_to_default, \n x0=self.q, eqcons=[x_constraint, y_constraint], \n args=(xy,), iprint=0) # iprint=0 suppresses output",
"def lie_bracket(self, matrix_a, matrix_b):\n return gs.matmul(matrix_a, matrix_b) - gs.matmul(matrix_b, matrix_a)",
"def reverse_quad(q):\n return [q[1], q[0], q[3], q[2]]",
"def needwu(A,B,S,d):\n AlignementA=\"\"\n AlignementB=\"\"\n F=matriF(A,B,S,d)\n i=len(A)-1\n j=len(B)-1\n\n while i>0 and j>0:\n score=F[i][j]\n scorediag=F[i-1][j-1]\n scoreup=F[i][j-1]\n scoreleft=F[i-1][j]\n\n if score==(scorediag+S[ind(A[i])][ind(B[j])]):\n AlignementA=A[i]+AlignementA\n AlignementB=B[j]+AlignementB\n i=i-1\n j=j-1\n\n elif score==(scoreleft+d):\n AlignementA=A[i]+AlignementA\n AlignementB=\"-\"+AlignementB\n i=i-1\n\n elif score==(scoreup+d):\n AlignementA=\"-\"+AlignementA\n AlignementB=B[j]+AlignementB\n j=j-1\n\n while i>0:\n AlignementA=A[i]+AlignementA\n AlignementB=\"-\"+AlignementB\n i=i-1\n\n while j>0:\n AlignementA=\"-\"+AlignementA\n AlignementB=B[j]+AlignementB\n j=j-1\n\n return AlignementA, AlignementB",
"def test_quaternion_hamilton():\n q_ij = pr.concatenate_quaternions(pr.q_i, pr.q_j)\n assert_array_equal(pr.q_k, q_ij)\n q_ijk = pr.concatenate_quaternions(q_ij, pr.q_k)\n assert_array_equal(-pr.q_id, q_ijk)",
"def T(self):\n # TODO - your code here\n transpose = []\n for col in range(self.w):\n new_row = []\n for row in range(self.h):\n new_row.append(self.g[row][col])\n transpose.append(new_row)\n return Matrix(transpose)\n # TODO - your code here",
"def Rt(X):\n return X[:2,:2], X[:2, 2]",
"def housetriang(A, B):\n m, n = shape(A); r = shape(B)[1] ; A=hstack([A,B]); \n minval = array([n, m - 1]).min()\n for k in range(minval):\n v, A[k, k] = housegen(A[k:m, k])\n v = matrix(reshape(v, (m - k, 1)))\n C = A[k:m, (k+1):(n+r)] ; A[k:m, (k + 1):(n + r)] = C - v*(v.T*C)\n R = triu(A[:, :n]); C = A[:, n:(n + r)]\n return R, C",
"def __rmul__(self, other):#标量乘法\n if isinstance(other, numbers.Number):\n pass\n # \n # TODO - your code here\n #\n result = [];\n row_result = [];\n \n for row in self.g:\n row_result = [m*other for m in row];\n result.append(row_result);\n return Matrix(result);",
"def getQ(m, t):\n\n Q = []\n for r in range(len(t)):\n qrow = []\n for c in range(len(t)):\n qrow.append(m[t[r]][t[c]])\n Q.append(qrow) \n return Q",
"def _transform_coordinates(rectangle, Q=np.matrix(((1, 1), (-1, 1)))):\n return tuple((rectangle[0]*Q).A1), tuple((rectangle[1]*Q).A1)",
"def quat2transform(q):\n x, y, z, w = q\n xx2 = 2 * x * x\n yy2 = 2 * y * y\n zz2 = 2 * z * z\n xy2 = 2 * x * y\n wz2 = 2 * w * z\n zx2 = 2 * z * x\n wy2 = 2 * w * y\n yz2 = 2 * y * z\n wx2 = 2 * w * x\n\n rmat = np.empty((3, 3), float)\n rmat[0,0] = 1. - yy2 - zz2\n rmat[0,1] = xy2 - wz2\n rmat[0,2] = zx2 + wy2\n rmat[1,0] = xy2 + wz2\n rmat[1,1] = 1. - xx2 - zz2\n rmat[1,2] = yz2 - wx2\n rmat[2,0] = zx2 - wy2\n rmat[2,1] = yz2 + wx2\n rmat[2,2] = 1. - xx2 - yy2\n\n return rmat",
"def sqrtw():\n return Operator([[(1.+1.j)/2,-1.j/np.sqrt(2)],[1./np.sqrt(2),(1.+1.j)/2]])",
"def transverse_resonator(Rs, Q, wr, w):\n Rs = _np.array(Rs,ndmin=1,dtype=float)[:,None] # I am using broadcasting\n Q = _np.array(Q, ndmin=1,dtype=float)[:,None]\n wr = _np.array(wr,ndmin=1,dtype=float)[:,None]\n Zt = wr*Rs/(w + 1j*Q*(wr - w**2/wr))\n return Zt.sum(0).flatten()",
"def test_align_invert(self):\n al = align(self.amp1, self.amp2, inverse=False)\n\n al_inv = align(self.amp2, self.amp1, inverse=True)\n\n print(al.R)\n print(al_inv.R)\n\n print(al.T)\n print(al_inv.T)",
"def leg_tr(tr_leg, tens_list, legs_list, ent_list):\n q_index = [legs.__contains__(tr_leg) for legs in legs_list].index(True,0)\n ax1 = legs_list[q_index].index(tr_leg,0)\n ax2 = legs_list[q_index].index(tr_leg,ax1+1)\n tens_list[q_index] = np.trace(tens_list[q_index], offset=0, axis1=ax1, axis2=ax2)\n legs_list[q_index].remove(tr_leg)\n legs_list[q_index].remove(tr_leg)\n ent_list[tr_leg] = np.array([0])",
"def trace(q_1: Qs) -> Qs:\n\n if q_1.rows != q_1.columns:\n raise ValueError(f\"Oops, not a square quaternion series: {q_1.rows}/{q_1.columns}\")\n\n else:\n tr = q_1.qs[0]\n\n for i in range(1, q_1.rows):\n tr = add(tr, q_1.qs[i * (q_1.rows + 1)])\n\n return Qs([tr])",
"def relTrace(mat, spinorsize):\n\n top = mat[:spinorsize, :spinorsize]\n bottom = mat[spinorsize:, spinorsize:]\n return 2*(top+bottom)",
"def gram_schmidt(mat_a):\n # NOTE: We will use the same variable names as the one in the\n # pseudo code for clarity\n rows_count = mat_a.shape[0]\n\n u = mat_a.copy()\n r = np.zeros_like(u)\n q = np.zeros_like(u)\n for i in range(rows_count):\n u_i = u[:, i]\n r[i, i] = np.linalg.norm(u_i)\n q[:, i] = u_i / r[i, i] if r[i, i] != 0 else 0\n q_i = q[:, i]\n\n r[i, i + 1:] = q_i.T.dot(u[:, i + 1:])\n # np.outer will multiply q_i by each number in r[i, i + 1:], and create\n # a matrix that each column is a result of that multiplication\n u[:, i + 1:] -= np.outer(q_i, r[i, i + 1:])\n\n return q, r",
"def upper_tri(A):\n d = pairwise_distances(A,A)\n m = d.shape[0]\n r,c = np.triu_indices(m,1)\n return(d[r,c])",
"def calc_quad(self,mw,A0,A1,A2): \n return (A0 + A1 * mw + A2 * mw**2)"
] | [
"0.5514963",
"0.52410305",
"0.5185732",
"0.51259094",
"0.5053267",
"0.5020422",
"0.49390778",
"0.4879556",
"0.48793352",
"0.4878152",
"0.4855",
"0.4854443",
"0.48542276",
"0.48520848",
"0.48423955",
"0.4821107",
"0.4791083",
"0.47887972",
"0.47870728",
"0.47847036",
"0.47841114",
"0.4783863",
"0.47800145",
"0.47678885",
"0.4763263",
"0.4752754",
"0.47518486",
"0.47462255",
"0.47438517",
"0.47342122"
] | 0.56926125 | 0 |
Stretch setup for biped (2 joint chain) arms and legs | def biped_stretch(ik_ctrl,
ik_last_node,
pv_ctrl,
switch_ctrl,
up_arm_fk_ctrl,
lo_arm_fk_ctrl,
wrist_fk_ctrl,
up_arm_ik_jnt,
lo_arm_ik_jnt,
wrist_ik_jnt,
ik_handle,
pin_attr_name='pinElbow',
shift_attr_name='shiftElbow'):
# add all my attrs on ctrls
mc.addAttr(ik_ctrl, ln=pin_attr_name, at='double', min=0, max=1, k=1)
mc.addAttr(ik_ctrl, ln=shift_attr_name, at='double', min=-1, max=1, k=1)
mc.addAttr(ik_ctrl, ln='autoStretch', at='double', min=0, max=1, k=1)
mc.addAttr(ik_ctrl, ln='upStretch', at='double', dv=1, min=0.001, k=1)
mc.addAttr(ik_ctrl, ln='loStretch', at='double', dv=1, min=0.001, k=1)
mc.addAttr(up_arm_fk_ctrl, ln='stretch', at='double', dv=1, min=0.001, k=1)
mc.addAttr(lo_arm_fk_ctrl, ln='stretch', at='double', dv=1, min=0.001, k=1)
# store initial length of joint
lo_init_length = mc.getAttr(lo_arm_ik_jnt+'.tx')
wrist_init_length = mc.getAttr(wrist_ik_jnt+'.tx')
max_init_length = mc.getAttr(lo_arm_ik_jnt+'.tx')+mc.getAttr(wrist_ik_jnt+'.tx')
lo_abs_init_length = abs(mc.getAttr(lo_arm_ik_jnt+'.tx'))
wrist_abs_length = abs(mc.getAttr(wrist_ik_jnt+'.tx'))
# Get parents for ik handle and root of the parm
arm_root_grp = utils.get_parent(up_arm_ik_jnt)
# Create distance nodes between base, end, and pv ctrl to get the length of side of the triangle
root_to_end_dist = utils.create_distance_reader(arm_root_grp, ik_last_node)
root_to_pv_dist = utils.create_distance_reader(arm_root_grp, pv_ctrl)
pv_to_end_dist = utils.create_distance_reader(pv_ctrl, ik_last_node)
# easy stuff first - create fk stretch nodes
lo_arm_fk_mdl = mc.createNode('multDoubleLinear')
wrist_fk_mdl = mc.createNode('multDoubleLinear')
mc.setAttr(lo_arm_fk_mdl+'.input1', mc.getAttr(lo_arm_ik_jnt+'.tx'))
mc.setAttr(wrist_fk_mdl+'.input1', mc.getAttr(wrist_ik_jnt+'.tx'))
mc.connectAttr(up_arm_fk_ctrl+'.stretch', lo_arm_fk_mdl+'.input2')
mc.connectAttr(lo_arm_fk_ctrl+'.stretch', wrist_fk_mdl+'.input2')
utils.connect_abs(lo_arm_fk_mdl+'.output', lo_arm_fk_ctrl+'_ZERO.tx')
if wrist_fk_ctrl and mc.objExists(wrist_fk_ctrl):
utils.connect_abs(wrist_fk_mdl+'.output', wrist_fk_ctrl+'_ZERO.tx')
# These arethe final fk stretch outputs to connect to joints
fk_stretch_final_output = [lo_arm_fk_mdl+'.output', wrist_fk_mdl+'.output']
# NOW creates node s for thew elbow pin
lo_arm_pin_mdl = mc.createNode('multDoubleLinear')
wrist_pin_mdl = mc.createNode('multDoubleLinear')
mc.setAttr(lo_arm_pin_mdl+'.input1', 1)
mc.setAttr(wrist_pin_mdl+'.input1', 1)
if lo_init_length < 0.0:
mc.setAttr(lo_arm_pin_mdl+'.input1', -1)
if wrist_init_length < 0.0:
mc.setAttr(wrist_pin_mdl+'.input1', -1)
mc.connectAttr(root_to_pv_dist+'.localDistance', lo_arm_pin_mdl+'.input2')
mc.connectAttr(pv_to_end_dist+'.localDistance', wrist_pin_mdl+'.input2')
# These arethe final elbow pin stretch outputs to connect to joints
pin_final_output = [lo_arm_pin_mdl+'.output', wrist_pin_mdl+'.output']
# create shift nodes
mc.addAttr(lo_arm_ik_jnt, ln='shiftLength', k=1)
mc.addAttr(wrist_ik_jnt, ln='shiftLength', k=1)
tt = 'linear'
mc.setDrivenKeyframe(lo_arm_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=0, v=lo_init_length, itt=tt, ott=tt)
mc.setDrivenKeyframe(lo_arm_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=1, v=0, itt=tt, ott=tt)
mc.setDrivenKeyframe(lo_arm_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=-1, v=max_init_length, itt=tt, ott=tt)
mc.setDrivenKeyframe(wrist_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=0, v=wrist_init_length, itt=tt, ott=tt)
mc.setDrivenKeyframe(wrist_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=1, v=max_init_length, itt=tt, ott=tt)
mc.setDrivenKeyframe(wrist_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=-1, v=0, itt=tt, ott=tt)
shift_final_output = [ lo_arm_ik_jnt+'.shiftLength', wrist_ik_jnt+'.shiftLength']
# Create ik indivisual stretch nodes
lo_arm_ik_scale_mdl = mc.createNode('multDoubleLinear')
wrist_ik_scale_mdl = mc.createNode('multDoubleLinear')
mc.connectAttr(shift_final_output[0], lo_arm_ik_scale_mdl+'.input1')
mc.connectAttr(shift_final_output[1], wrist_ik_scale_mdl+'.input1')
mc.connectAttr(ik_ctrl+'.upStretch', lo_arm_ik_scale_mdl+'.input2')
mc.connectAttr(ik_ctrl+'.loStretch', wrist_ik_scale_mdl+'.input2')
# This is the final output for scale and shift
ik_stretch_final_output = [lo_arm_ik_scale_mdl+'.output', wrist_ik_scale_mdl+'.output']
# Now create the IK auto stretch nodes
lo_auto_stretch_mdl = mc.createNode('multDoubleLinear')
wrist_auto_stretch_mdl = mc.createNode('multDoubleLinear')
auto_stretch_clamp = mc.createNode('clamp')
mc.setAttr(auto_stretch_clamp+'.minR', 1)
mc.setAttr(auto_stretch_clamp+'.maxR', 10000000)
mc.connectAttr(ik_stretch_final_output[0], lo_auto_stretch_mdl+'.input1', f=1)
mc.connectAttr(ik_stretch_final_output[1], wrist_auto_stretch_mdl+'.input1', f=1)
mc.connectAttr(root_to_end_dist+'.stretchFactor', auto_stretch_clamp+'.inputR')
mc.connectAttr(auto_stretch_clamp+'.outputR', lo_auto_stretch_mdl+'.input2', f=1)
mc.connectAttr(auto_stretch_clamp+'.outputR', wrist_auto_stretch_mdl+'.input2', f=1)
adl = mc.createNode('addDoubleLinear')
mc.connectAttr(lo_arm_ik_scale_mdl+'.output', adl+'.input1')
mc.connectAttr(wrist_ik_scale_mdl+'.output', adl+'.input2')
utils.connect_abs(adl+'.output', root_to_end_dist+'.jointChainLength')
# handle soft ik handle constraint override
pc = mc.pointConstraint(ik_last_node, ik_handle)[0]
if mc.objExists(up_arm_ik_jnt+'.softIkChainLength'):
# compensate feed in new chain length for soft ik chain length
utils.connect_abs(adl+'.output', up_arm_ik_jnt+'.softIkChainLength')
# blend off the soft ik constraint IF im in auto s tretch or pin mode
mdl = mc.createNode('multDoubleLinear')
utils.connect_reverse(ik_ctrl+'.'+pin_attr_name, mdl+'.input1')
utils.connect_reverse(ik_ctrl+'.autoStretch', mdl+'.input2')
mc.connectAttr(mdl+'.output', pc+'.w0')
utils.connect_reverse(pc+'.w0', pc+'.w1')
ik_auto_stretch_final_output = [lo_auto_stretch_mdl+'.output', wrist_auto_stretch_mdl+'.output']
# now create all my blends
# first blend btween FK and an empty ik input
# (this ikl input will take another blend node for blending oall the IK options )
fk_to_ik_blend = mc.createNode('blendColors')
mc.connectAttr(switch_ctrl+'.IK', fk_to_ik_blend+'.blender')
mc.connectAttr(fk_stretch_final_output[0], fk_to_ik_blend+'.color2R')
mc.connectAttr(fk_stretch_final_output[1], fk_to_ik_blend+'.color2G')
# now create a blender between pin elbow and the rest of the ik options
auto_ik_blend = mc.createNode('blendColors')
mc.connectAttr(ik_ctrl+'.autoStretch', auto_ik_blend+'.blender')
mc.connectAttr(ik_auto_stretch_final_output[0], auto_ik_blend+'.color1R')
mc.connectAttr(ik_auto_stretch_final_output[1], auto_ik_blend+'.color1G')
# Now connect it toth fk blend
mc.connectAttr(auto_ik_blend+'.outputR', fk_to_ik_blend+'.color1R')
mc.connectAttr(auto_ik_blend+'.outputG', fk_to_ik_blend+'.color1G')
# now create a blender between pin elbow and the rest of the ik options
pin_ik_blend = mc.createNode('blendColors')
mc.connectAttr(ik_ctrl+'.'+pin_attr_name, pin_ik_blend+'.blender')
mc.connectAttr(pin_final_output[0], pin_ik_blend+'.color1R')
mc.connectAttr(pin_final_output[1], pin_ik_blend+'.color1G')
# Now connect it toth fk blend
mc.connectAttr(pin_ik_blend+'.outputR', auto_ik_blend+'.color2R')
mc.connectAttr(pin_ik_blend+'.outputG', auto_ik_blend+'.color2G')
# now connect the shift and scale
mc.connectAttr(ik_stretch_final_output[0], pin_ik_blend+'.color2R')
mc.connectAttr(ik_stretch_final_output[1], pin_ik_blend+'.color2G')
# now for the magic! Connect the blend networll to joints
mc.connectAttr(fk_to_ik_blend+'.outputR', lo_arm_ik_jnt+'.tx')
mc.connectAttr(fk_to_ik_blend+'.outputG', wrist_ik_jnt+'.tx') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def multi_joint_stretch(ik_ctrl, ik_last_node, switch_ctrl, fk_ctrls, jnts, ik_handle):\n\n root_grp = utils.get_parent(jnts[0])\n stretch_jnts = jnts[1:]\n stretch_fk_ctrls = fk_ctrls[1:]\n\n # create attrs\n attrs = ['upStretch','loStretch']\n for i in reversed(range(len(stretch_jnts)-2)):\n ltr = ''\n if i > 0:\n ltr = utils.letters[i]\n\n attrs.insert(1, 'midStretch'+ltr)\n\n if not mc.objExists(ik_ctrl+'.autoStretch'):\n mc.addAttr(ik_ctrl, ln='autoStretch', at='double', min=0, max=1, k=1)\n\n for i in range(len(stretch_jnts)):\n if not mc.objExists(ik_ctrl+'.'+attrs[i]):\n mc.addAttr(ik_ctrl, ln=attrs[i], at='double', dv=1, min=0.001, k=1)\n\n for fk_ctrl in fk_ctrls[:-1]:\n if not mc.objExists(fk_ctrl+'.stretch'):\n mc.addAttr(fk_ctrl, ln='stretch', at='double', dv=1, min=0.001, k=1)\n\n # store initial length of joint\n init_lengths = [mc.getAttr(j+'.tx') for j in stretch_jnts]\n abs_init_lengths = [abs(v) for v in init_lengths]\n\n total_init_length = 0\n for v in init_lengths:\n total_init_length += v\n\n abs_total_init_length = abs(total_init_length)\n\n # Create dist reader\n root_to_end_dist = utils.create_distance_reader(root_grp, ik_last_node)\n\n auto_stretch_clamp = mc.createNode('clamp')\n mc.setAttr(auto_stretch_clamp+'.minR', 1)\n mc.setAttr(auto_stretch_clamp+'.maxR', 10000000)\n mc.connectAttr(root_to_end_dist+'.stretchFactor', auto_stretch_clamp+'.inputR')\n\n mc.addAttr(ik_ctrl, ln='stretchFactor', k=0)\n mc.connectAttr(auto_stretch_clamp+'.inputR', ik_ctrl+'.stretchFactor')\n\n pma = mc.createNode('plusMinusAverage')\n utils.connect_abs(pma+'.output1D', root_to_end_dist+'.jointChainLength')\n\n # handle soft ik handle constraint override\n pc = mc.pointConstraint(ik_last_node, ik_handle)[0]\n if mc.objExists(jnts[0]+'.softIkChainLength'):\n\n # compensate chain length - feed in new chain length for soft ik chain length\n utils.connect_abs(pma+'.output1D', jnts[0]+'.softIkChainLength')\n\n # blend off the soft ik constraint IF im in auto stretch\n mc.connectAttr(ik_ctrl+'.autoStretch', pc+'.w1')\n utils.connect_reverse(pc+'.w1', pc+'.w0')\n\n # easy stuff first - create fk stretch nodes\n fk_to_ik_blends = [] # This is the final output for IK stretch\n\n for i, jnt in enumerate(stretch_jnts):\n\n # easy stuff first - create fk stretch nodes\n fk_mdl = mc.createNode('multDoubleLinear')\n mc.setAttr(fk_mdl+'.input1', mc.getAttr(jnt+'.tx'))\n mc.connectAttr(fk_ctrls[i]+'.stretch', fk_mdl+'.input2')\n utils.connect_abs(fk_mdl+'.output', fk_ctrls[i+1]+'_ZERO.tx')\n\n # Create user secifed IK stretch\n user_ik_scale_mdl = mc.createNode('multDoubleLinear')\n mc.setAttr( user_ik_scale_mdl+'.input1', init_lengths[i])\n mc.connectAttr(ik_ctrl+'.'+attrs[i], user_ik_scale_mdl+'.input2')\n\n # Now create the IK auto stretch nodes\n auto_stretch_mdl = mc.createNode('multDoubleLinear')\n mc.connectAttr(user_ik_scale_mdl+'.output', auto_stretch_mdl+'.input1', f=1)\n mc.connectAttr(auto_stretch_clamp+'.outputR', auto_stretch_mdl+'.input2', f=1)\n mc.connectAttr(user_ik_scale_mdl+'.output', '{0}.input1D[{1}]'.format(pma, i))\n\n fk_to_ik_blend = mc.createNode('blendTwoAttr')\n auto_stretch_blend = mc.createNode('blendTwoAttr')\n\n mc.connectAttr(switch_ctrl+'.IK', fk_to_ik_blend+'.attributesBlender')\n mc.connectAttr(fk_mdl+'.output', fk_to_ik_blend+'.input[0]')\n mc.connectAttr(auto_stretch_blend+'.output', fk_to_ik_blend+'.input[1]')\n\n mc.connectAttr(ik_ctrl+'.autoStretch', auto_stretch_blend+'.attributesBlender')\n mc.connectAttr(user_ik_scale_mdl+'.output', auto_stretch_blend+'.input[0]')\n mc.connectAttr(auto_stretch_mdl+'.output', auto_stretch_blend+'.input[1]')\n\n fk_to_ik_blends.append(fk_to_ik_blend+'.output')\n\n for i, jnt in enumerate(stretch_jnts):\n mc.connectAttr(fk_to_ik_blends[i], jnt+'.tx')",
"def ar_addStretchSquash():\n setupName = 'Nose'\n sel = cmds.ls(sl=True)\n chain = cmds.ls(sel[0], dag=True, typ='joint')\n IKSpine = cmds.ikHandle(sj=chain[0], ee=chain[len(chain) - 1], sol='ikSplineSolver')\n # rename\n cmds.rename(IKSpine[0], 'IKSplineHandle_' + setupName)\n cmds.rename(IKSpine[1], 'IKSplineEff_' + setupName)\n cmds.rename(IKSpine[2], 'IKSplineCurve_' + setupName)\n # create new joints.\n cmds.select(cl=True)\n bindStartJt = cmds.joint(n='JtCrvBind01')\n cmds.select(cl=True)\n bindEndJt = cmds.joint(n='JtCrvBind02')\n cmds.delete(cmds.parentConstraint(chain[0], bindStartJt))\n cmds.delete(cmds.parentConstraint(chain[len(chain) - 1], bindEndJt))\n\n cmds.skinCluster(bindStartJt, bindEndJt, 'IKSplineCurve_' + setupName, bm=0, sm=0, nw=1, wd=0, mi=2)\n ctlStart = cmds.circle(nr=[1, 0, 0], n='Toony' + setupName + '01_CTRL', ch=False)\n extraGrp = cmds.createNode('transform', n='Toony' + setupName + '01ExtraGrp')\n offGrp = cmds.createNode('transform', n='Toony' + setupName + '01OffsetGrp')\n cmds.parent(ctlStart[0], extraGrp)\n cmds.parent(extraGrp, offGrp)\n cmds.delete(cmds.parentConstraint(bindStartJt, offGrp))\n # endJOint\n ctlEnd = cmds.circle(nr=[1, 0, 0], n='Toony' + setupName + '02_CTRL', ch=False)\n extraGrpEnd = cmds.createNode('transform', n='Toony' + setupName + '02ExtraGrp')\n offGrpEnd = cmds.createNode('transform', n='Toony' + setupName + '02OffsetGrp')\n cmds.parent(ctlEnd[0], extraGrpEnd)\n cmds.parent(extraGrpEnd, offGrpEnd)\n cmds.delete(cmds.parentConstraint(bindEndJt, offGrpEnd))\n # parent constraint wiht bind joints.\n cmds.parentConstraint(ctlStart[0], bindStartJt)\n cmds.parentConstraint(ctlEnd[0], bindEndJt)\n # Create connection with node basis.\n crvInfo = cmds.createNode('curveInfo', n='curveInfo_Toony' + setupName)\n shpCrv = cmds.listRelatives('IKSplineCurve_' + setupName, s=True)\n cmds.connectAttr(shpCrv[0] + '.worldSpace[0]', crvInfo + '.inputCurve', f=True)\n mdnForSX = cmds.createNode('multiplyDivide', n='multiplyDivide_Toony' + setupName + '_ScaleX')\n mdnForPW = cmds.createNode('multiplyDivide', n='multiplyDivide_Toony' + setupName + '_Power')\n mdnForYZ = cmds.createNode('multiplyDivide', n='multiplyDivide_Toony' + setupName + '_ScaleYZ')\n cmds.setAttr(mdnForSX + '.operation', 2)\n cmds.setAttr(mdnForPW + '.operation', 3)\n cmds.setAttr(mdnForYZ + '.operation', 2)\n # connections.\n cmds.connectAttr(crvInfo + '.arcLength', mdnForSX + '.input1X', f=True)\n cmds.setAttr(mdnForSX + '.input2X', cmds.getAttr(mdnForSX + '.input1X'))\n scaledJoint = chain[:-1]\n for each in scaledJoint:\n cmds.connectAttr(mdnForSX + '.outputX', each + '.sx', f=True)\n # power connections.\n cmds.connectAttr(mdnForSX + '.outputX', mdnForPW + '.input1X', f=True)\n cmds.setAttr(mdnForPW + '.input2X', 0.5)\n cmds.connectAttr(mdnForPW + '.outputX', mdnForYZ + '.input2X', f=True)\n cmds.setAttr(mdnForYZ + '.input1X', 1)\n for each in scaledJoint:\n cmds.connectAttr(mdnForYZ + '.outputX', each + '.sy')\n cmds.connectAttr(mdnForYZ + '.outputX', each + '.sz')\n # TODO: need to full proof this function.",
"def gripStretchQgsLineStringGeometry(geom, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve):\n obj = qad_utils.whatGeomIs(0, geom)\n if (type(obj) != list and type(obj) != tuple):\n objType = obj.whatIs()\n if objType == \"CIRCLE\": # se é cerchio\n newCircle = gripStretchCircle(obj, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve)\n if newCircle is not None:\n return QgsGeometry.fromPolyline(newCircle.asPolyline(tolerance2ApproxCurve))\n elif objType == \"ARC\": # se é arco\n newArc = gripStretchArc(obj, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve)\n if newArc is not None:\n return QgsGeometry.fromPolyline(newArc.asPolyline(tolerance2ApproxCurve))\n return None\n \n linearObjectListToStretch = qad_utils.QadLinearObjectList()\n linearObjectListToStretch.fromPolyline(geom.asPolyline())\n \n atPart = 0\n while atPart < linearObjectListToStretch.qty():\n linearObject = linearObjectListToStretch.getLinearObjectAt(atPart) \n if linearObject.isSegment():\n pt = linearObject.getStartPt()\n if isPtContainedForStretch(pt, ptListToStretch): # se il punto è contenuto in ptListToStretch\n # cambio punto iniziale \n pt.setX(pt.x() + offSetX)\n pt.setY(pt.y() + offSetY)\n linearObject.setStartPt(pt)\n \n pt = linearObject.getEndPt()\n if isPtContainedForStretch(pt, ptListToStretch): # se il punto è contenuto in ptListToStretch\n # cambio punto finale\n pt.setX(pt.x() + offSetX)\n pt.setY(pt.y() + offSetY)\n linearObject.setEndPt(pt)\n else: # se è arco\n newArc, newInverseFlag = gripStretchArc(linearObject.getArc(), ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve, linearObject.isInverseArc())\n if newArc is None:\n return None\n linearObject.setArc(newArc, newInverseFlag)\n\n atPart = atPart + 1\n \n pt = linearObjectListToStretch.getCentroid(tolerance2ApproxCurve) # verifico se polilinea ha un centroide\n if pt is not None:\n if isPtContainedForStretch(pt, ptListToStretch): # se il punto è contenuto in ptListToStretch\n linearObjectListToStretch.move(offSetX, offSetY)\n \n pts = linearObjectListToStretch.asPolyline(tolerance2ApproxCurve)\n stretchedGeom = QgsGeometry.fromPolyline(pts) \n \n return stretchedGeom",
"def route_bitlines(self):\n # adds the BL on metal 2\n offset = vector(self.bitcell.get_pin(self.bitcell_bl).cx(),0) - vector(0.5 * self.m2_width,0)\n self.add_layout_pin(text=\"bl\",\n layer=\"metal2\",\n offset=offset,\n width=drc['minwidth_metal2'],\n height=self.height)\n\n # adds the BR on metal 2\n offset = vector(self.bitcell.get_pin(self.bitcell_br).cx(),0) - vector(0.5 * self.m2_width,0)\n self.add_layout_pin(text=\"br\",\n layer=\"metal2\",\n offset=offset,\n width=drc['minwidth_metal2'],\n height=self.height)",
"def __init__(self, workplane, measures):\n\n cq.Workplane.bracket = utilities.bracket\n cq.Workplane.transformedWorkplane = utilities.transformedWorkplane\n cq.Workplane.bolt = utilities.bolt\n cq.Workplane.cutEachAdaptive = utilities.cutEachAdaptive\n\n self.model = workplane\n self.debug = False\n self.measures = measures\n m = self.measures\n\n # The bracket lengths are measured at the outside, but the construction actually uses a \n # central cuboid block with two attached brackets. Adapting the measures accordingly.\n m.center_block = Measures(\n # Naming is as seen from the horizontal leg.\n width = max(m.horizontal_leg.width, m.vertical_leg.width),\n depth = m.vertical_leg.height,\n height = m.horizontal_leg.height\n )\n m.horizontal_leg.depth -= m.center_block.depth\n m.vertical_leg.depth -= m.center_block.height\n\n # Create hole specs which combine the other hole measures in the format expected by bolthole().\n m.horizontal_leg.hole_specs = [\n {\n \"diameter\": m.horizontal_leg.hole_diameters[i] if isinstance(m.horizontal_leg.hole_diameters, list) else m.horizontal_leg.hole_diameters,\n \"clamp_length\": m.horizontal_leg.clamp_lengths[i] if isinstance(m.horizontal_leg.clamp_lengths, list) else m.horizontal_leg.clamp_lengths, \n \"nuthole_size\": m.horizontal_leg.nuthole_sizes[i] if isinstance(m.horizontal_leg.nuthole_sizes, list) else m.horizontal_leg.nuthole_sizes, \n \"nuthole_depth\": 1.1 * m.vertical_leg.depth # Just choose something large enough for cutting. \n }\n for i in range(m.horizontal_leg.hole_count)\n ]\n m.vertical_leg.hole_specs = [\n {\n \"diameter\": m.vertical_leg.hole_diameters[i] if isinstance(m.vertical_leg.hole_diameters, list) else m.vertical_leg.hole_diameters,\n \"clamp_length\": m.vertical_leg.clamp_lengths[i] if isinstance(m.vertical_leg.clamp_lengths, list) else m.vertical_leg.clamp_lengths, \n \"nuthole_size\": m.vertical_leg.nuthole_sizes[i] if isinstance(m.vertical_leg.nuthole_sizes, list) else m.vertical_leg.nuthole_sizes, \n \"nuthole_depth\": 1.1 * m.horizontal_leg.depth # Just choose something large enough for cutting. \n }\n for i in range(m.vertical_leg.hole_count)\n ]\n\n # TODO: Initialize missing measures with defaults.\n\n self.build()",
"def blockScale_bake(self,sizeMethod = 'axisSize',force=False,):\n try:\n _str_func = 'bake_blockScale'\n log.debug(cgmGEN.logString_start(_str_func))\n str_self = self.mNode\n \n if self.p_parent:\n return log.error(cgmGEN.logString_msg(_str_func, \"Can't bake parented blocks, please unparent\"))\n \n _blockScale = self.blockScale\n \n if MATH.is_float_equivalent(_blockScale,1):\n log.debug(cgmGEN.logString_msg(_str_func, 'Already 1.0'))\n return True\n \n if self.hasAttr('baseSize'):\n _baseSize = True\n for a in 'xyz':\n if ATTR.is_connected(str_self,'baseSize'+a.capitalize()):\n _baseSize=False\n break\n if _baseSize:\n log.info(cgmGEN.logString_msg(_str_func, 'baseSize buffer. Not connected'))\n self.baseSize = baseSize_get(self)\n \n _factor = 1.0/_blockScale\n \n ml_ctrls = controls_get(self, define=True, form=True, prerig=True)\n md_dat = {}\n \n log.debug(cgmGEN.logString_sub(_str_func, 'Gather Dat'))\n #First Loop gateher\n for i,mCtrl in enumerate(ml_ctrls):\n _str = mCtrl.p_nameShort\n _d = {'str':_str}\n \n if not ATTR.is_locked(_str,'translate'):\n _d['pos']=mCtrl.p_position\n \n _d['lossyScale'] = TRANS.scaleLossy_get(_str)\n _d['worldScale'] = mc.xform(_str, q=True, scale = True, worldSpace = True, absolute = True)\n _d['factorScale'] = [v*_factor for v in _d['worldScale']]\n \n _d['noParent'] = False\n if ATTR.is_locked(_str,'translate'):\n _d['noParent'] = True\n \n \n for a in ['sx','sy','sz']:\n if not ATTR.is_locked(_str,a):\n v = ATTR.get(_str,a)\n #if not MATH.is_float_equivalent(1.0,v):\n _d[a] = v * _blockScale\n if not _d.get('axisSize'):\n _d['axisSize'] = DIST.get_axisSize(_str)\n if not _d.get('bbSize'):\n _d['bbSize'] = TRANS.bbSize_get(_str)\n \n md_dat[i] = _d\n \n \n #pprint.pprint(md_dat)\n #return\n log.debug(cgmGEN.logString_msg(_str_func, 'Setting intiial'))\n ATTR.set(self.mNode,'blockScale',1.0)\n \"\"\"\n blockDat_save(self)\n blockDat_load(self,redefine=True) \n ml_ctrls = controls_get(self, define=True, form=True, prerig=True) \n \"\"\"\n \n for ii in range(3):#3 loop to account for parentage\n log.debug(cgmGEN.logString_sub(_str_func, 'Push: {0}'.format(ii)))\n\n for i,mCtrl in enumerate(ml_ctrls):\n _d = md_dat[i]\n log.debug(cgmGEN.logString_msg(_str_func, \"{0} | {1}\".format(_d['str'],_d)))\n _pos = _d.get('pos')\n _noParent = _d['noParent']\n \n if _pos:mCtrl.p_position = _pos\n \n \n _worldScale = _d.get('worldScale')\n if _worldScale and _noParent is not True:\n mParent = mCtrl.p_parent\n if mParent:\n mCtrl.p_parent = False\n \n #mc.xform(mCtrl.mNode, scale = _worldScale, objectSpace = True, absolute = True)\n mc.xform(mCtrl.mNode, scale = _worldScale, worldSpace = True, absolute = True)\n \n if mParent:mCtrl.p_parent = mParent\n else:\n if not ATTR.is_locked(mCtrl.mNode,'scale'):\n \"\"\"\n _worldScale = _d.get('factorScale')\n if _worldScale:\n mc.xform(_str, scale = _worldScale, worldSpace = True, )#absolute = True\n \n for a in ['sx','sy','sz']:\n if _d.get(a):\n ATTR.set(_d['str'],a,_d[a])\"\"\"\n \n if sizeMethod == 'axisSize':\n if _d.get('axisSize'):\n try:\n DIST.scale_to_axisSize(_d['str'],_d['axisSize'])\n except Exception,err:\n log.warning(cgmGEN.logString_msg(_str_func, \"{0} | failed to axisSize {1}\".format(_d['str'],err)))\n elif sizeMethod in ['bb','bbSize']:\n if _d.get('bbSize'):\n try:\n #reload(TRANS)\n TRANS.scale_to_boundingBox(_d['str'],_d['bbSize'],freeze=False)\n except Exception,err:\n log.warning(cgmGEN.logString_msg(_str_func, \"{0} | failed to axisSize {1}\".format(_d['str'],err)))\n \"\"\"\n if ii == 0:\n _worldScale = _d.get('factorScale')\n if _worldScale:\n mc.xform(_str, scale = _worldScale, worldSpace = True, )#absolute = True\n \n for a in ['sx','sy','sz']:\n if _d.get(a):\n ATTR.set(_d['str'],a,_d[a])\n \n if sizeMethod == 'axisSize':\n if _d.get('axisSize'):\n try:\n DIST.scale_to_axisSize(_d['str'],_d['axisSize'])\n except Exception,err:\n log.warning(cgmGEN.logString_msg(_str_func, \"{0} | failed to axisSize {1}\".format(_d['str'],err)))\n elif sizeMethod in ['bb','bbSize']:\n if _d.get('bbSize'):\n try:\n reload(TRANS)\n TRANS.scale_to_boundingBox(_d['str'],_d['bbSize'],freeze=False)\n except Exception,err:\n log.warning(cgmGEN.logString_msg(_str_func, \"{0} | failed to axisSize {1}\".format(_d['str'],err)))\"\"\" \n #Fix the root shape\n #if not ATTR.is_connected(self.mNode,'baseSize'):\n #log.info(cgmGEN.logString_sub(_str_func, 'Base size buffer'))\n \n rootShape_update(self)\n #pprint.pprint(vars())\n return True \n except Exception,err:\n cgmGEN.cgmExceptCB(Exception,err)",
"def blockScale_bake(self,sizeMethod = 'axisSize',force=False,):\n try:\n _str_func = 'bake_blockScale'\n log.debug(cgmGEN.logString_start(_str_func))\n str_self = self.mNode\n \n if self.p_parent:\n return log.error(cgmGEN.logString_msg(_str_func, \"Can't bake parented blocks, please unparent\"))\n \n _blockScale = self.blockScale\n \n if MATH.is_float_equivalent(_blockScale,1):\n log.debug(cgmGEN.logString_msg(_str_func, 'Already 1.0'))\n return True\n \n if self.hasAttr('baseSize'):\n _baseSize = True\n for a in 'xyz':\n if ATTR.is_connected(str_self,'baseSize'+a.capitalize()):\n _baseSize=False\n break\n if _baseSize:\n log.info(cgmGEN.logString_msg(_str_func, 'baseSize buffer. Not connected'))\n self.baseSize = baseSize_get(self)\n \n _factor = 1.0/_blockScale\n \n ml_ctrls = controls_get(self, define=True, form=True, prerig=True)\n md_dat = {}\n \n log.debug(cgmGEN.logString_sub(_str_func, 'Gather Dat'))\n #First Loop gateher\n for i,mCtrl in enumerate(ml_ctrls):\n _str = mCtrl.p_nameShort\n _d = {'str':_str}\n \n if not ATTR.is_locked(_str,'translate'):\n _d['pos']=mCtrl.p_position\n \n _d['lossyScale'] = TRANS.scaleLossy_get(_str)\n _d['worldScale'] = mc.xform(_str, q=True, scale = True, worldSpace = True, absolute = True)\n _d['factorScale'] = [v*_factor for v in _d['worldScale']]\n \n _d['noParent'] = False\n if ATTR.is_locked(_str,'translate'):\n _d['noParent'] = True\n \n \n for a in ['sx','sy','sz']:\n if not ATTR.is_locked(_str,a):\n v = ATTR.get(_str,a)\n #if not MATH.is_float_equivalent(1.0,v):\n _d[a] = v * _blockScale\n if not _d.get('axisSize'):\n _d['axisSize'] = DIST.get_axisSize(_str)\n if not _d.get('bbSize'):\n _d['bbSize'] = TRANS.bbSize_get(_str)\n \n md_dat[i] = _d\n \n \n #pprint.pprint(md_dat)\n #return\n log.debug(cgmGEN.logString_msg(_str_func, 'Setting intiial'))\n ATTR.set(self.mNode,'blockScale',1.0)\n \"\"\"\n blockDat_save(self)\n blockDat_load(self,redefine=True) \n ml_ctrls = controls_get(self, define=True, form=True, prerig=True) \n \"\"\"\n \n for ii in range(3):#3 loop to account for parentage\n log.debug(cgmGEN.logString_sub(_str_func, 'Push: {0}'.format(ii)))\n\n for i,mCtrl in enumerate(ml_ctrls):\n _d = md_dat[i]\n log.debug(cgmGEN.logString_msg(_str_func, \"{0} | {1}\".format(_d['str'],_d)))\n _pos = _d.get('pos')\n _noParent = _d['noParent']\n \n if _pos:mCtrl.p_position = _pos\n \n \n _worldScale = _d.get('worldScale')\n if _worldScale and _noParent is not True:\n mParent = mCtrl.p_parent\n if mParent:\n mCtrl.p_parent = False\n \n #mc.xform(mCtrl.mNode, scale = _worldScale, objectSpace = True, absolute = True)\n mc.xform(mCtrl.mNode, scale = _worldScale, worldSpace = True, absolute = True)\n \n if mParent:mCtrl.p_parent = mParent\n else:\n if not ATTR.is_locked(mCtrl.mNode,'scale'):\n \"\"\"\n _worldScale = _d.get('factorScale')\n if _worldScale:\n mc.xform(_str, scale = _worldScale, worldSpace = True, )#absolute = True\n \n for a in ['sx','sy','sz']:\n if _d.get(a):\n ATTR.set(_d['str'],a,_d[a])\"\"\"\n \n if sizeMethod == 'axisSize':\n if _d.get('axisSize'):\n try:\n DIST.scale_to_axisSize(_d['str'],_d['axisSize'])\n except Exception,err:\n log.warning(cgmGEN.logString_msg(_str_func, \"{0} | failed to axisSize {1}\".format(_d['str'],err)))\n elif sizeMethod in ['bb','bbSize']:\n if _d.get('bbSize'):\n try:\n reload(TRANS)\n TRANS.scale_to_boundingBox(_d['str'],_d['bbSize'],freeze=False)\n except Exception,err:\n log.warning(cgmGEN.logString_msg(_str_func, \"{0} | failed to axisSize {1}\".format(_d['str'],err)))\n \"\"\"\n if ii == 0:\n _worldScale = _d.get('factorScale')\n if _worldScale:\n mc.xform(_str, scale = _worldScale, worldSpace = True, )#absolute = True\n \n for a in ['sx','sy','sz']:\n if _d.get(a):\n ATTR.set(_d['str'],a,_d[a])\n \n if sizeMethod == 'axisSize':\n if _d.get('axisSize'):\n try:\n DIST.scale_to_axisSize(_d['str'],_d['axisSize'])\n except Exception,err:\n log.warning(cgmGEN.logString_msg(_str_func, \"{0} | failed to axisSize {1}\".format(_d['str'],err)))\n elif sizeMethod in ['bb','bbSize']:\n if _d.get('bbSize'):\n try:\n reload(TRANS)\n TRANS.scale_to_boundingBox(_d['str'],_d['bbSize'],freeze=False)\n except Exception,err:\n log.warning(cgmGEN.logString_msg(_str_func, \"{0} | failed to axisSize {1}\".format(_d['str'],err)))\"\"\" \n #Fix the root shape\n #if not ATTR.is_connected(self.mNode,'baseSize'):\n #log.info(cgmGEN.logString_sub(_str_func, 'Base size buffer'))\n \n rootShape_update(self)\n #pprint.pprint(vars())\n return True \n except Exception,err:\n cgmGEN.cgmExceptCB(Exception,err)",
"def gripStretchQgsGeometry(geom, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve):\n wkbType = geom.wkbType()\n if wkbType == QGis.WKBPoint or wkbType == QGis.WKBPoint25D:\n pt = stretchPoint(geom.asPoint(), ptListToStretch, offSetX, offSetY)\n if pt is not None:\n return QgsGeometry.fromPoint(pt)\n \n if wkbType == QGis.WKBMultiPoint:\n stretchedGeom = QgsGeometry(geom)\n points = stretchedGeom.asMultiPoint() # vettore di punti\n atSubGeom = 0\n for pt in points:\n subGeom = QgsGeometry.fromPoint(pt)\n stretchedSubGeom = gripStretchQgsGeometry(subGeom, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve)\n stretchedGeom = qad_utils.setSubGeom(stretchedGeom, stretchedSubGeom, [atSubGeom]) \n atSubGeom = atSubGeom + 1\n return stretchedGeom\n\n if wkbType == QGis.WKBLineString:\n return gripStretchQgsLineStringGeometry(geom, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve)\n \n if wkbType == QGis.WKBMultiLineString:\n stretchedGeom = QgsGeometry(geom)\n lines = stretchedGeom.asMultiPolyline() # lista di linee\n atSubGeom = 0\n for line in lines: \n subGeom = QgsGeometry.fromPolyline(line)\n stretchedSubGeom = gripStretchQgsGeometry(subGeom, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve)\n stretchedGeom = qad_utils.setSubGeom(stretchedGeom, stretchedSubGeom, [atSubGeom]) \n atSubGeom = atSubGeom + 1\n return stretchedGeom\n \n if wkbType == QGis.WKBPolygon:\n stretchedGeom = QgsGeometry(geom)\n lines = stretchedGeom.asPolygon() # lista di linee\n atSubGeom = 0\n for line in lines: \n subGeom = QgsGeometry.fromPolyline(line)\n stretchedSubGeom = gripStretchQgsGeometry(subGeom, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve)\n stretchedGeom = qad_utils.setSubGeom(stretchedGeom, stretchedSubGeom, [atSubGeom]) \n atSubGeom = atSubGeom + 1\n return stretchedGeom\n \n if wkbType == QGis.WKBMultiPolygon:\n stretchedGeom = QgsGeometry(geom)\n polygons = geom.asMultiPolygon() # vettore di poligoni\n atSubGeom = 0\n for polygon in polygons:\n subGeom = QgsGeometry.fromPolygon(polygon)\n stretchedSubGeom = gripStretchQgsGeometry(subGeom, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve)\n stretchedGeom = qad_utils.setSubGeom(stretchedGeom, stretchedSubGeom, [atSubGeom]) \n atSubGeom = atSubGeom + 1\n return stretchedGeom\n \n return None",
"def gripStretchQgsLinearObjectList(linearObjectList, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve):\n linearObjectListToStretch = qad_utils.QadLinearObjectList(linearObjectList)\n \n atPart = 0\n while atPart < linearObjectListToStretch.qty():\n linearObject = linearObjectListToStretch.getLinearObjectAt(atPart) \n if linearObject.isSegment():\n pt = linearObject.getStartPt()\n if isPtContainedForStretch(pt, ptListToStretch): # se il punto è contenuto in ptListToStretch\n # cambio punto iniziale \n pt.setX(pt.x() + offSetX)\n pt.setY(pt.y() + offSetY)\n linearObject.setStartPt(pt)\n \n pt = linearObject.getEndPt()\n if isPtContainedForStretch(pt, ptListToStretch): # se il punto è contenuto in ptListToStretch\n # cambio punto finale\n pt.setX(pt.x() + offSetX)\n pt.setY(pt.y() + offSetY)\n linearObject.setEndPt(pt)\n else: # se è arco\n newArc, newInverseFlag = gripStretchArc(linearObject.getArc(), ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve, linearObject.isInverseArc())\n if newArc is None:\n return None\n linearObject.setArc(newArc, newInverseFlag)\n\n atPart = atPart + 1\n \n pt = linearObjectListToStretch.getCentroid(tolerance2ApproxCurve) # verifico se polilinea ha un centroide\n if pt is not None:\n if isPtContainedForStretch(pt, ptListToStretch): # se il punto è contenuto in ptListToStretch\n linearObjectListToStretch.move(offSetX, offSetY)\n\n return linearObjectListToStretch",
"def place_rebar_long_flex(as_req, width, cover, link_d, d, spacing):\r\n \"\"\"as_req - required area of steel\"\"\"\r\n \"\"\"width - beam width; cover - beam side cover; link_d; shear link diameter in millimeters\"\"\"\r\n \"\"\"d - diameter of bar; spacing - minimum spacing of bars\"\"\"\r\n\r\n layer = [2] # initialize vector that stores number of bottom bars (minimum 2)\r\n as_ = layer[-1] * d ** 2 * np.pi / 4 # determine current area of reinforcement\r\n\r\n while as_ < as_req: # while amount of reinforcement of the beam is less than required\r\n layer[-1] = layer[-1] + 1 # add one bar\r\n # evaluate distance between bars\r\n d_axis_b = (int(width) - 2 * cover - 2 * link_d - d) / (int(layer[-1]) - 1) # distance between bar axis\r\n d_bars_b = d_axis_b - d # distance between bars\r\n\r\n if d_bars_b < spacing: # in case bars are spaced less than spacing variable\r\n layer[-1] = layer[-1] - 1 # go back to previous number of bars\r\n layer.append(2) # add another layer of bars with minimum of 2 bars\r\n\r\n as_ = sum(layer) * d ** 2 * np.pi / 4 # update current area of reinforcement\r\n\r\n rebar = []\r\n\r\n for i in range(len(layer)):\r\n layers_list = [layer[i], d]\r\n rebar.append(layers_list)\r\n\r\n return rebar",
"def build_rig(self):\n\n # create rig part top nodes\n self.create_part_master()\n\n # Get all the relevant part info\n prefix = self.prefix\n options = self.options\n anim_ctrls = self.anim_ctrls\n bind_jnts = self.bind_joints\n hooks = self.hooks\n ctrl_grps = self.ctrl_grps\n jnt_grps = self.jnt_grps\n\n mirror = self.mirror_value\n\n parent = options.get('parent')\n squash_stretch = options.get('squashStretch')\n aimDownBone = options.get('aimDownBone')\n single_joint = options.get('singleJoint')\n number_joints = options.get('numberJoints')\n pickWalk_parent = options.get('pickWalkParent')\n\n # Create ctrls\n zeros, ctrls, offsets, last_nodes = [], [], [], []\n\n for i, ctrl_name in enumerate(anim_ctrls):\n zero, ctrl, offCtrls, last_node = self.anim_ctrl(ctrl_name)\n zeros.append(zero)\n ctrls.append(ctrl)\n offsets.append(offCtrls)\n last_nodes.append(last_node)\n\n #Setup pickwaliking attributes for the fingers\n i = 0\n ctrls.reverse()\n for ctrl in ctrls:\n\n if i+1 < len(ctrls):\n\n pickWalk.attribute_tag(ctrls[i],ctrls[i+1])\n else:\n pickWalk.attribute_tag(ctrls[i],pickWalk_parent)\n break\n\n i+=1\n ctrls.reverse()\n\n if len(ctrls) > 1:\n for i in range(1, len(ctrls), 1):\n mc.parent(zeros[i], last_nodes[i-1])\n\n # constraint jnts\n if len(bind_jnts) > 2:\n\n # point and aim/orient contraint all joints down the chain based on the\n for i in range(len(last_nodes)-1):\n mc.pointConstraint(last_nodes[i], bind_jnts[i], mo=1, n=bind_jnts[i]+'_pc')\n if not squash_stretch:\n mc.scaleConstraint(last_nodes[i], bind_jnts[i], mo=1, n=bind_jnts[i]+'_sc')\n\n if i < len(last_nodes)-1:\n print aimDownBone\n if aimDownBone:\n mc.aimConstraint(last_nodes[i+1],\n bind_jnts[i],\n aim=[mirror,0,0],\n u=[0,1,0],\n wu=[0,1,0],\n wut='objectRotation',\n wuo=last_nodes[i],\n mo=1, n=bind_jnts[i]+'_ac')\n if aimDownBone == False:\n mc.orientConstraint(last_nodes[i],bind_jnts[i],n=bind_jnts[i]+'_oc')\n\n #parent constrain the last joint ot the last ctrl\n # mc.parentConstraint(last_nodes[-1], bind_jnts[-2], mo=1, n=bind_jnts[-2]+'_prc')\n # mc.parentConstraint(last_nodes[-1], bind_jnts[-1], mo=1, n=bind_jnts[-1]+'_prc')\n\n # if not squash_stretch:\n # mc.scaleConstraint(last_nodes[-1], bind_jnts[-2], mo=1, n=bind_jnts[-2]+'_sc')\n # mc.scaleConstraint(last_nodes[-1], bind_jnts[-1], mo=1, n=bind_jnts[-1]+'_sc')\n\n elif single_joint or number_joints == 1:\n mc.parentConstraint(last_nodes[0], bind_jnts[0], mo=1, n=bind_jnts[0]+'_prc')\n mc.scaleConstraint(last_nodes[0], bind_jnts[0], mo=1, n=bind_jnts[0]+'_sc')\n\n else:\n if squash_stretch:\n spline.preserve_volume(ctrls, bind_jnts[:-1], ctrls[0], attrs=['sy','sz'])\n\n mc.parentConstraint(bind_jnts[-2], bind_jnts[-1], mo=1, n=bind_jnts[-1]+'_prc')\n mc.scaleConstraint(bind_jnts[-2], bind_jnts[-1], mo=1, n=bind_jnts[-1]+'_sc')\n\n mc.parent(zeros[0], ctrl_grps[0])\n mc.parent(bind_jnts, jnt_grps[0])\n\n if not single_joint and number_joints == 1:\n mc.parent(bind_jnts[-1], bind_jnts[0])\n\n #utils.create_cfx_curves(self.bind_joints, self.prefix+'_'+self.part_type)\n\n if len(ctrls) > 1:\n spaces.tag(ctrls, arg='partParent:'+self.options.get('parent'))\n else:\n spaces.tag(ctrls)\n\n self.finalize_part()",
"def bndy_plasma(self):\n self.ne[0], self.ne[-1] = 1e11, 1e11\n self.ni[0], self.ni[-1] = 1e11, 1e11\n self.nn[0], self.nn[-1] = 1e11, 1e11\n self.Te[0], self.Te[-1] = 0.1, 0.1\n self.Ti[0], self.Ti[-1] = 0.01, 0.01\n # self.coll_em[0], self.coll_em[-1] = 1e5, 1e5\n # self.coll_im[0], self.coll_im[-1] = 1e5, 1e5",
"def prepare_blockages(self, pin_name):\n debug.info(3,\"Preparing blockages.\")\n \n # Start fresh. Not the best for run-time, but simpler.\n self.clear_blockages()\n # This adds the initial blockges of the design\n #print(\"BLOCKING:\",self.blocked_grids)\n self.set_blockages(self.blocked_grids,True)\n\n # Block all of the supply rails (some will be unblocked if they're a target)\n self.set_supply_rail_blocked(True)\n \n # Block all of the pin components (some will be unblocked if they're a source/target)\n # Also block the previous routes\n for name in self.pin_groups:\n blockage_grids = {y for x in self.pin_groups[name] for y in x.grids}\n self.set_blockages(blockage_grids,True)\n blockage_grids = {y for x in self.pin_groups[name] for y in x.blockages}\n self.set_blockages(blockage_grids,True)\n\n # FIXME: These duplicate a bit of work\n # These are the paths that have already been routed.\n self.set_blockages(self.path_blockages)\n\n # Don't mark the other components as targets since we want to route\n # directly to a rail, but unblock all the source components so we can\n # route over them\n blockage_grids = {y for x in self.pin_groups[pin_name] for y in x.grids}\n self.set_blockages(blockage_grids,False)",
"def setup_layout_constants(self):\n # determines the spacing between the edge and nmos (rail to active\n # metal or poly_to_poly spacing)\n half_gate_to_gate = 0.5 * (drc[\"poly_to_poly\"] - drc[\"minwidth_metal1\"])\n edge_to_nmos = max(drc[\"metal1_to_metal1\"] - self.nmos.active_contact_positions[0].y,\n half_gate_to_gate - self.nmos.poly_positions[0].y)\n\n # determine the position of the first transistor from the left\n self.nmos_position1 = vector(0,\n 0.5 * drc[\"minwidth_metal1\"] + edge_to_nmos)\n offset = self.nmos_position1 + vector(0,self.nmos.height)\n\n x = vector(self.nmos.active_width - self.nmos.active_contact.width, 0)\n self.nmos_position2 = x + self.nmos_position1.scale(0,1)\n\n # determines the spacing between the edge and pmos\n edge_to_pmos = max(drc[\"metal1_to_metal1\"] - self.pmos.active_contact_positions[0].y,\n half_gate_to_gate - self.pmos.poly_positions[0].y)\n self.pmos_position1 = vector(0,\n self.height - 0.5 * drc[\"minwidth_metal1\"]\n - edge_to_pmos - self.pmos.height)\n self.pmos_position2 = self.pmos_position1 + vector(self.pmos.width,0)\n\n self.well_width = max(self.pmos_position2.x + self.pmos.active_position.x\n + self.pmos.active_width\n + drc[\"active_to_body_active\"] + self.nwell_contact.width \n + drc[\"well_enclosure_active\"],\n self.nmos_position2.x + self.nmos.active_position.x \n + self.nmos.active_width \n + drc[\"active_to_body_active\"] + drc[\"well_enclosure_active\"])\n self.width = self.well_width",
"def setup_fitting_init_pars(inparam, night, band, masterbeam, order):\n\n # Determine whether IGRINS mounting was loose or\n # the night of interest is in question\n if (int(night) < 20180401) or (int(night) > 20190531):\n IPpars = inparam.ips_tightmount_pars[band][masterbeam][order]\n else:\n IPpars = inparam.ips_loosemount_pars[band][masterbeam][order]\n\n # start at bucket loc = 1250 +- 100, width = 250 +- 100,\n # depth = 100 +- 5000 but floor at 0\n centerloc = 1250 if band == 'H' else 1180\n\n # Initialize parameter array for optimization as well as half-range values\n # for each parameter during the various steps of the optimization.\n # Many of the parameters initialized here will be changed throughout the\n # code before optimization and in between optimization steps.\n\n parA0 = np.array([\n 0.0, # 0: The shift of the stellar template (km/s)\n 0.0, # 1: The scale factor for the stellar template\n 0.0, # 2: The shift of the telluric template (km/s)\n 1.0, # 3: The scale factor for the telluric template\n 0.0, # 4: vsini (km/s)\n IPpars[2], # 5: The instrumental resolution (FWHM) in pixels\n 0.0, # 6: Wavelength 0-pt\n 0.0, # 7: Wavelength linear component\n 0.0, # 8: Wavelength quadratic component\n 0.0, # 9: Wavelength cubic component\n 1.0, #10: Continuum zero point\n 0.0, #11: Continuum linear component\n 0.0, #12: Continuum quadratic component\n IPpars[1], #13: Instrumental resolution linear component\n IPpars[0], #14: Instrumental resolution quadratic component\n centerloc, #15: Blaze dip center location\n 330, #16: Blaze dip full width\n 0.05, #17: Blaze dip depth\n 90, #18: Secondary blaze dip full width\n 0.05, #19: Blaze dip depth\n 0.0, #20: Continuum cubic component\n 0.0, #21: Continuum quartic component\n 0.0, #22: Continuum quintic component\n 0.0, #23: Continuum hexic component\n 0.0, #24: secondary par\n 0.0, #25: secondary par\n 0.0, #26: secondary par\n 0.0 #27: secondary par\n ])\n\n return parA0",
"def prepare(info_dict):\n\n logger.info(\"\\n-=# Chain optimization cycle 0 #=- \\n\")\n params, M, engine, result, _ = get_basic_info(info_dict)\n\n logger.info(\"Spring Force: %.2f kcal/mol/Ang^2 \\n\" % params.nebk)\n\n tmpdir = tempfile.mkdtemp()\n\n # Getting the initial chain.\n chain = ElasticBand(M, engine=engine, tmpdir=tmpdir, params=params, plain=params.plain)\n\n trust = params.trust\n chain.ComputeChain(result=result)\n chain.ComputeGuessHessian(blank=isinstance(engine, Blank))\n chain.PrintStatus()\n\n avgg_print, maxg_print = print_forces(chain, params.avgg, params.maxg)\n logger.info(\"-= Chain Properties =- \\n\")\n logger.info(\n \"@\\n%13s %13s %13s %13s %11s %13s %13s \\n\"\n % (\"GAvg(eV/Ang)\", \"GMax(eV/Ang)\", \"Length(Ang)\", \"DeltaE(kcal)\", \"RMSD(Ang)\", \"TrustRad(Ang)\", \"Step Quality\")\n )\n logger.info(\n \"@%13s %13s %13s \\n\"\n % (\n \" %s \" % avgg_print,\n \" %s \" % maxg_print,\n \"% 8.4f \" % sum(chain.calc_spacings()),\n )\n )\n\n GW = chain.get_global_grad(\"total\", \"working\")\n GP = chain.get_global_grad(\"total\", \"plain\")\n HW = chain.guess_hessian_working.copy()\n HP = chain.guess_hessian_plain.copy()\n dy, expect, expectG, ForceRebuild = chain.CalcInternalStep(trust, HW, HP)\n new_chain = chain.TakeStep(dy)\n respaced = new_chain.delete_insert(1.5)\n newcoords = chaintocoords(new_chain)\n attrs_new = check_attr(new_chain)\n attrs_prev = check_attr(chain)\n\n temp = {\"Ys\": [chain.get_internal_all().tolist()], \"GWs\": [GW.tolist()], \"GPs\": [GP.tolist()], \"attrs_new\": attrs_new,\n \"attrs_prev\": attrs_prev, \"trust\": trust, \"expect\": expect, \"expectG\": expectG.tolist(), \"respaced\": respaced,\n \"trustprint\": \"=\", \"frocerebuild\": False,\"lastforce\": 0, \"coord_ang_prev\": chaintocoords(chain, True),\n \"result_prev\": result, \"geometry\": []}\n info_dict.update(temp)\n return newcoords, info_dict",
"def determine_doubler_plate(self, connection_type, steel, left_beam, right_beam, bottom_column, top_column):\r\n if connection_type == 'top exterior':\r\n # Connection has one left beam and one bottom column\r\n h_bot = bottom_column.unbraced_length['x']*12.0 # Be cautious: convert the unit from ft to inch\r\n db = left_beam.section['d']\r\n tf = left_beam.section['tf']\r\n self.shear_force['Vc'] = (self.moment['Mf1']+0) / (h_bot/2+0)\r\n self.shear_force['Ru'] = (self.moment['Mf1']+0)/(db-tf) - self.shear_force['Vc']\r\n elif connection_type == 'typical exterior':\r\n # Connection has one left beam and two columns\r\n h_bot = bottom_column.unbraced_length['x']*12.0 # Be cautious: convert the unit from ft to inch\r\n h_top = top_column.unbraced_length['x']*12.0 # Be cautious: convert the unit from ft to inch\r\n db = left_beam.section['d']\r\n tf = left_beam.section['tf']\r\n self.shear_force['Vc'] = (self.moment['Mf1']+0) / (h_bot/2+h_top/2)\r\n self.shear_force['Ru'] = (self.moment['Mf1']+0)/(db-tf) - self.shear_force['Vc']\r\n elif connection_type == 'top interior':\r\n # Connection has two beams and one bottom column\r\n h_bot = bottom_column.unbraced_length['x']*12.0 # Be cautious: convert the unit from ft to inch\r\n # Actually left and right beams have the identical sizes\r\n db = (left_beam.section['d'] + right_beam.section['d'])/2\r\n tf = (left_beam.section['tf'] + right_beam.section['tf'])/2\r\n self.shear_force['Vc'] = (self.moment['Mf1']+self.moment['Mf2']) / (h_bot/2)\r\n self.shear_force['Ru'] = (self.moment['Mf1']+self.moment['Mf2'])/(db-tf) - self.shear_force['Vc']\r\n elif connection_type == 'typical interior':\r\n # Connection has two beams and two columns\r\n h_bot = bottom_column.unbraced_length['x']*12.0 # Be cautious: convert the unit from ft to inch\r\n h_top = top_column.unbraced_length['x']*12.0 # Be cautious: convert the unit from ft to inch\r\n db = (left_beam.section['d'] + right_beam.section['d']) / 2\r\n tf = (left_beam.section['tf'] + right_beam.section['tf']) / 2\r\n self.shear_force['Vc'] = (self.moment['Mf1']+self.moment['Mf2']) / (h_bot/2+h_top/2)\r\n self.shear_force['Ru'] = (self.moment['Mf1']+self.moment['Mf2'])/(db-tf) - self.shear_force['Vc']\r\n else:\r\n sys.stderr.write('Error: wrong type of connection specified!\\nNo such keyword for connection exists!\\n')\r\n sys.exit(2)\r\n # Compute the shear strength of the panel zone\r\n phi = 1.0\r\n dc = bottom_column.section['d']\r\n tw = bottom_column.section['tw']\r\n bcf = bottom_column.section['bf']\r\n tcf = bottom_column.section['tf']\r\n db = left_beam.section['d']\r\n self.shear_force['Rn'] = 0.60 * steel.Fy * dc * tw * (1+(3*bcf*tcf**2)/(db*dc*tw))\r\n # Compute the doubler plate thickness\r\n if phi*self.shear_force['Rn'] >= self.shear_force['Ru']:\r\n # Panel zone shear strength is sufficient ==> no need for doubler plate\r\n self.doubler_plate_thickness = 0\r\n else:\r\n # Panel zone shear strength is not sufficient ==> need doubler plate\r\n required_tp = (self.shear_force['Ru'] - 0.60*steel.Fy*(3*bcf*tcf**2)/db) / (0.60*steel.Fy*dc)\r\n tp = 0.25 # Assumed doubler plate thickness\r\n while tp < required_tp:\r\n tp += 0.25 # Update the thickness at an increment of 0.25 until it reaches the requirement\r\n self.doubler_plate_thickness = tp",
"def _grow_secondary(self, amt):\n self._resize_secondary(amt)",
"def beam_align():\n\n # do nothing if there is a sample mounted to avoid collisions\n if smart_magnet.sample_detect.get() == 0:\n raise Exception(\"Sample mounted on gonio! Avoided collision\")\n\n # wait for attenuators to finish moving\n yield from bps.abs_set(mxatten, 0.002)\n yield from bps.sleep(5)\n\n # transition to BL and open shutter\n yield from bps.abs_set(gov_rbt, \"BL\", wait=True)\n yield from bps.mv(sht.r, 0)\n\n yield from bps.abs_set(rot_aligner.cam_hi.cam_mode, \"beam_align\")\n\n # which direction, x pos. pitch beam outboard (-), y pos. pitch beam up (+)\n scan_uid = yield from bp.count([rot_aligner.cam_hi], 1)\n centroid_x, centroid_y = (\n db[scan_uid].table()[rot_aligner.cam_hi.cv1.outputs.output1.name][1],\n db[scan_uid].table()[rot_aligner.cam_hi.cv1.outputs.output2.name][1],\n )\n\n if np.isclose(0, centroid_x) or np.isclose(0, centroid_y):\n raise Exception(\"No centroid detected!\")\n\n yield from bps.abs_set(kbt.hor.delta_px, (centroid_x - 320))\n yield from bps.abs_set(kbt.ver.delta_px, -(centroid_y - 256))\n\n def lin_reg(independent, dependent, goal, **kwargs) -> float:\n b = dependent\n A = np.matrix([[pos, 1] for pos in independent])\n p = (\n np.linalg.inv(A.transpose() * A)\n * A.transpose()\n * np.matrix(b.to_numpy()).transpose()\n )\n best = (goal - p[1]) / p[0]\n return best\n\n for axis, signal, center in (\n kbt.hor,\n rot_aligner.cam_hi.cv1.outputs.output1,\n 320,\n ), (kbt.ver, rot_aligner.cam_hi.cv1.outputs.output2, 256):\n # skip if we are within 1 um\n if abs(axis.delta_px.get()) > 2:\n scan_uid = yield from rel_scan_no_reset(\n [rot_aligner.cam_hi],\n axis,\n 0,\n 0.4 * -(axis.delta_px.get() / abs(axis.delta_px.get())),\n 10,\n )\n scan_df = db[scan_uid].table()\n best_voltage = lin_reg(\n scan_df[axis.readback.name],\n scan_df[signal.name],\n center,\n )\n yield from bps.mv(axis, best_voltage)\n yield from bps.sleep(1)\n\n # close shutters and reset attenuators for manual viewing\n yield from bps.mv(sht.r, 20)",
"def edbl():\n bpy.ops.transform.edge_slide(value=self.btr, mirror=False, correct_uv=False)\n bpy.ops.mesh.bevel(offset=self.bofs/2 , segments=self.bss+1 , vertex_only=False)\n bpy.ops.mesh.select_less()\n bpy.ops.transform.shrink_fatten(value=(self.bts * -1) if self.dms == 1 else self.bts, use_even_offset=self.bev)\n bpy.ops.mesh.remove_doubles(threshold=self.brd)\n if self.brx == True:\n try:\n bpy.ops.mesh.looptools_relax(input='selected', interpolation='linear', iterations='3', regular=False)\n except AttributeError:\n self.report({'ERROR'},\"I'm sorry the addon 'Looptools' is not active or not installed.\")\n if self.dsp == 1:\n bpy.ops.mesh.bevel(offset=0.1, segments=2, vertex_only=False)\n bpy.ops.mesh.select_less()\n bpy.ops.transform.shrink_fatten(value=0.2, use_even_offset=False, mirror=False, proportional='CONNECTED',\n proportional_edit_falloff='SMOOTH', proportional_size=0.0839017)",
"def sub_link_capacity(self, path, bw):\n \n # PART 1, TASK 3.4 sub bw to edges",
"def fix_bone_lengths(self, b_armature_data):\n for b_edit_bone in b_armature_data.edit_bones:\n #don't change root bones\n if b_edit_bone.parent:\n # take the desired length from the mean of all children's heads\n if b_edit_bone.children:\n childheads = mathutils.Vector()\n for b_child in b_edit_bone.children:\n childheads += b_child.head\n bone_length = (b_edit_bone.head - childheads/len(b_edit_bone.children)).length\n if bone_length < 0.01:\n bone_length = 0.25\n # end of a chain\n else:\n bone_length = b_edit_bone.parent.length\n b_edit_bone.length = bone_length",
"def make_shaped_repertoire(RNs):\n # get objective distribution\n bin_edges, obj_dist, volume = objective_distribution()\n # get an antigenic epitope sequence, and in case of nkey=1,2 check whether\n # it can populate all required bins, thus avoiding infinite loop below\n AgEpitope = get_AgEpitope(RNs)\n if cf.nkey == 1 or cf.nkey == 2:\n while 1:\n # get list of all possible binding partners and their energies\n all_partners = get_all_partners()\n all_energies = [E_best(partner, AgEpitope)\n for partner in all_partners]\n # check whether all bins are occupiable with these energies,\n # if not, get new epitope sequence\n indices = np.digitize(all_energies, bin_edges, right=True)\n ind_set = set(indices)\n ind_set.discard(0)\n # if all bins can be occupied, move on\n if ind_set == set(range(1, len(bin_edges))):\n break\n # else get a new epitope and check its validity\n else:\n AgEpitope = get_AgEpitope(RNs)\n # initialise empty list for counting how many seqs have been found per bin\n ist_dist = np.zeros(len(obj_dist))\n # seq_list for collecting identified sequences\n seq_list = []\n E_list = []\n # while ist_dist and obj_dist are not equal, get new sequences and position\n # them if they are useful\n # introduce a tolerance of how far bins are allowed to deviate from the\n # goal, as otherwise runtime explodes due to very long waiting times for\n # high binding energy codes in large nkey cases - allow an absolute\n # deviation of volume*tolerance % for each bin.\n abs_tol = volume * 0.005\n while np.sum(np.abs((ist_dist-obj_dist)) > abs_tol) > 0:\n ab = Ab_seq(RNs)\n Emax = E_best(ab, AgEpitope)\n # find index bin of this energy\n indx = np.digitize(Emax, bin_edges, right=True)\n # if the index is in the useful range and the bin is not yet full,\n # count the sequence and store it\n if indx in range(1, len(bin_edges)):\n if obj_dist[indx-1] - ist_dist[indx-1] > 0:\n ist_dist[indx-1] += 1\n seq_list.append(ab)\n E_list.append(Emax)\n\n return seq_list, E_list, AgEpitope",
"def __init__(\n self, model_path, n_substeps, gripper_extra_height, block_gripper,\n has_object, target_in_the_air, target_offset, obj_range, target_range,\n distance_threshold, initial_qpos, reward_type, goal_high_prob,\n min_goal_extra_height=0.0, max_goal_extra_height=0.45,\n min_dist_between_objs=0.1, same_color_radius=0.5,\n terminate_on_success=False\n ):\n self.gripper_extra_height = gripper_extra_height\n self.block_gripper = block_gripper\n self.has_object = has_object\n self.target_in_the_air = target_in_the_air\n self.target_offset = target_offset\n self.obj_range = obj_range\n self.target_range = target_range\n self.distance_threshold = distance_threshold\n self.reward_type = reward_type\n self.goal_high_prob = goal_high_prob\n self.min_goal_extra_height = min_goal_extra_height\n self.max_goal_extra_height = max_goal_extra_height\n self.min_dist_between_objs = min_dist_between_objs\n self.same_color_radius = same_color_radius\n\n few_shot_robot_env.FewShotRobotEnv.__init__(\n self, model_path=model_path, n_substeps=n_substeps, n_actions=4,\n initial_qpos=initial_qpos, terminate_on_success=terminate_on_success\n )",
"def oss_stacked(block, cut, laser):\r\n\tx0_1, x1_1, z0_1, taper_x_1, taper_y_1, layers_1, pyramid_angle_1 = oss_helper(block, cut, laser, cut[\"final_dimension_x\"]/2)\r\n\tx0_2, x1_2, z0_2, taper_x_2, taper_y_2, layers_2, pyramid_angle_2 = oss_helper(block, cut, laser, cut[\"final_dimension_y\"]/2)\r\n\tangle = math.radians(laser[\"kerf_angle\"]/2)\r\n\tgap = math.tan(pyramid_angle_1) * (cut[\"final_dimension_x\"]/2) + cut[\"gap_size\"]\r\n\tunit_length = gap + cut[\"base_height\"]\r\n\tmax_slices = math.floor(block[\"thickness\"]/unit_length)\r\n\ttaper_straight = math.tan(angle)*(laser[\"z_spacing\"])\r\n\r\n\tif cut[\"core\"] == \"yes\":\r\n\t\tcutlist = json.loads(vertical_core(block,cut,laser))\r\n\t\tcutlist.pop()\r\n\t\tcutlist.pop(0)\r\n\telse:\r\n\t\tcutlist = []\r\n\r\n\ta0 = -(90 + math.degrees(angle))\r\n\r\n\tz_shift = (cut[\"base_height\"] + gap) * math.sin(angle)\r\n\tx_shift = (cut[\"base_height\"] + gap) * math.cos(angle)\r\n\r\n\tx_delta = math.sin(angle) * block[\"origin_x\"]\r\n\ty_delta = math.sin(angle) * block[\"origin_y\"]\r\n\tz1_delta = math.cos(angle) * block[\"origin_x\"]\r\n\tz2_delta = math.cos(angle) * block[\"origin_y\"]\r\n\r\n\tcutlist.append([\"a_abs\",f\"{a0:.6f}\"])\r\n\tcutlist.append([\"c_abs\",str(block[\"physical_rotation\"])])\r\n\tcutlist.append([\"z_abs\",str(z0_1 + z2_delta)])\r\n\r\n\tif pyramid_angle_1 >= angle and pyramid_angle_2 >= angle:\r\n\r\n\t\tif cut[\"num_of_seeds\"] == \"max\":\r\n\t\t\tnum_slices = max_slices\r\n\t\telse:\r\n\t\t\tnum_slices = cut[\"num_of_seeds\"] + 1\r\n\t\t\r\n\t\tfor i in range(num_slices):\r\n\t\t\tcutlist = (cutlist\r\n\t\t\t\t\t\t + pyramid_slice(cut[\"final_dimension_y\"]/2 - block[\"origin_x\"],x0_1 + y_delta,-cut[\"final_dimension_y\"]/2 - block[\"origin_x\"],x1_1 + y_delta,z0_1 + block[\"origin_y\"],laser[\"xy_spacing\"], laser[\"z_spacing\"], taper_x_1,taper_y_1,taper_straight,layers_1)\r\n\t\t\t\t\t\t + [[\"z_abs\",str(z0_2 + z1_delta)]] + [[\"c_abs\",\"90\"]]\r\n\t\t\t\t\t\t + pyramid_slice(cut[\"final_dimension_x\"]/2 + block[\"origin_y\"],x0_2 + x_delta,-cut[\"final_dimension_x\"]/2 + block[\"origin_y\"],x1_2 + x_delta,z0_2 + block[\"origin_x\"],laser[\"xy_spacing\"], laser[\"z_spacing\"], taper_x_2,taper_y_2,taper_straight,layers_2)\r\n\t\t\t\t\t\t + [[\"z_abs\",str(z0_1 - z2_delta)]] + [[\"c_abs\",\"180\"]]\r\n\t\t\t\t\t\t + pyramid_slice(cut[\"final_dimension_y\"]/2 + block[\"origin_x\"],x0_1 - y_delta,-cut[\"final_dimension_y\"]/2 + block[\"origin_x\"],x1_1 - y_delta,z0_1 - block[\"origin_y\"],laser[\"xy_spacing\"], laser[\"z_spacing\"], taper_x_1,taper_y_1,taper_straight,layers_1)\r\n\t\t\t\t\t\t + [[\"z_abs\",str(z0_2 - z1_delta)]] + [[\"c_abs\",\"270\"]]\r\n\t\t\t\t\t\t + pyramid_slice(cut[\"final_dimension_x\"]/2 - block[\"origin_y\"],x0_2 - x_delta,-cut[\"final_dimension_x\"]/2 - block[\"origin_y\"],x1_2 - x_delta,z0_2 - block[\"origin_x\"],laser[\"xy_spacing\"], laser[\"z_spacing\"], taper_x_2,taper_y_2,taper_straight,layers_2)\r\n\t\t\t\t\t\t )\r\n\t\t\tz0_1 = z0_1 + z_shift\r\n\t\t\tz0_2 = z0_2 + z_shift\r\n\t\t\tx0_1, x1_1, x0_2, x1_2 = x0_1 - x_shift, x1_1 - x_shift, x0_2 - x_shift, x1_2 - x_shift\r\n\t\t\tcutlist.append([\"c_abs\",str(block[\"physical_rotation\"])])\r\n\t\t\tcutlist.append([\"z_abs\",str(z0_1 + z2_delta)])\t\r\n\telse:\r\n\t\traise Exception(\"Pyramid angle too small\")\r\n\r\n\tcutlist.insert(0, [\"set_trigger4\", \"1\", \"0\", \"7\", \"8\", \"45\"])\r\n\tcutlist.append([\"stop_trigger\"])\r\n\treturn json.dumps(cutlist)",
"def BrockBird_scaling(fluid,network,propname,sigma_o,To,**params):\n Tc = fluid.get_pore_data(prop='Tc')\n Ti = network.get_pore_data(phase=fluid,prop='temperature')\n Tro = To/Tc\n Tri = Ti/Tc\n value = sigma_o*(1-Tri)**(11/9)/(1-Tro)**(11/9)\n network.set_pore_data(phase=fluid,prop=propname,data=value)",
"def __init__(self, connection_type, steel, beam_dead_load, beam_live_load, span,\r\n left_beam=None, right_beam=None, top_column=None, bottom_column=None):\r\n self.connection_type = connection_type\r\n # The dictionary used to store the RBS dimensions\r\n self.left_RBS_dimension = {}\r\n self.right_RBS_dimension = {}\r\n # The dictionary used to store the probable moment\r\n self.moment = {}\r\n # The dictionary used to store the shear force\r\n self.shear_force = {} # keys:\r\n # A scalar used to denote the doubler plate thickness\r\n self.doubler_plate_thickness = 0\r\n # A dictionary used to store the failure mode (if any)\r\n self.is_feasible = {} # keys: 'geometry limit', 'flexural strength', 'shear strength', 'SCWB'\r\n # Define a boolean flag which denotes the overall check results (True means OK.)\r\n self.flag = None\r\n\r\n # Call methods to initialize the attributes listed above\r\n self.check_column_beam(connection_type, left_beam, right_beam, top_column, bottom_column)\r\n self.extract_reduced_beam_section(connection_type, left_beam, right_beam)\r\n self.compute_probable_moment_RBS(connection_type, steel, left_beam, right_beam)\r\n self.compute_shear_force_RBS(connection_type, beam_dead_load, beam_live_load, span, bottom_column)\r\n self.compute_probable_moment_column_face(connection_type)\r\n self.compute_plastic_moment(connection_type, steel, left_beam, right_beam)\r\n self.check_moment_column_face(connection_type)\r\n self.check_shear_strength(connection_type, beam_dead_load, beam_live_load, left_beam, right_beam)\r\n self.check_column_beam_relationships(connection_type, steel, left_beam, right_beam, top_column, bottom_column)\r\n self.determine_doubler_plate(connection_type, steel, left_beam, right_beam, bottom_column, top_column)",
"def rigid_rings(self):\n raise NotImplementedError",
"def update_flow(self):\n start_nodes = []\n end_nodes = []\n capacities = []\n # (1): add all edges (u, v) with capacity ub-lb\n B = self.get_max_lb()*(self.num_edges() - len(self) + 2)\n for arc in self.arc_info.keys():\n if self.arc_info[arc][\"upper_bound\"] == float('inf'):\n self.arc_info[arc][\"upper_bound\"] = B\n for arc in self.arc_info.keys():\n start_nodes.append(self.arc_info[arc][\"start\"])\n end_nodes.append(self.arc_info[arc][\"destin\"])\n capacities.append(int(self.arc_info[arc][\"upper_bound\"]\\\n - self.arc_info[arc][\"lower_bound\"]))\n # (2): add edge (t, s) with capacity B\n # B = max_lb * (m - n + 2)\n B = self.get_max_lb()*(self.num_edges() - len(self) + 2)\n if B == 0:\n #B = float('inf')\n B = 100000\n start_nodes.append(self.sink())\n end_nodes.append(self.source())\n capacities.append(int(B))\n # (3): for all verts, if exc > 0, add edge (s', v) with capacity exc(v),\n # and if exc < 0, add edge(s', v) with capacity -exc(v)\n s_prime = max(self.vertices) + 1\n t_prime = max(self.vertices) + 2\n print(\"s'={}, t'={}\".format(s_prime, t_prime))\n for v in self:\n #print(\"vert {} in arcs: {}\".format(v,\n # self.in_arcs_lists[v]))\n # compute exc: lower bounds of in - lower bounds of out\n sum_lb_in = 0\n for in_arc in self.in_arcs_lists[v]:\n sum_lb_in += self.arc_info[in_arc][\"lower_bound\"]\n sum_lb_out = 0\n #print(\"vert {} out arcs: {}\".format(v,\n # self.out_arcs_lists[v]))\n for out_arc in self.out_arcs_lists[v]:\n sum_lb_out += self.arc_info[out_arc][\"lower_bound\"]\n exc = sum_lb_in - sum_lb_out\n #print(\"exc is {}\".format(exc))\n if exc > 0:\n start_nodes.append(s_prime)\n end_nodes.append(v)\n capacities.append(int(exc))\n else:\n start_nodes.append(v)\n end_nodes.append(t_prime)\n capacities.append(int(-exc))\n # solve maxflow\n #print(\"s' is {} and t' is {}\".format(s_prime, t_prime))\n max_flow = pywrapgraph.SimpleMaxFlow()\n for u, v, cap in zip(start_nodes, end_nodes, capacities):\n #print(\"Adding edge {}, {} with cap {}\".format(u,v,cap))\n max_flow.AddArcWithCapacity(u, v, cap)\n success = True\n if max_flow.Solve(s_prime, t_prime) == max_flow.OPTIMAL:\n #print('Max flow: {}'.format( max_flow.OptimalFlow()))\n #print(' Arc Flow / Capacity')\n for i in range(max_flow.NumArcs()):\n # print('%1s -> %1s %3s / %3s' % (\n # max_flow.Tail(i),\n # max_flow.Head(i),\n # max_flow.Flow(i),\n # max_flow.Capacity(i)))\n # check that (s', v) edges are saturated (once we find a false,\n # stay false forever)\n if success:\n if max_flow.Tail(i) == s_prime:\n success = max_flow.Flow(i) == max_flow.Capacity(i)\n else:\n success = False\n print('There was an issue with the max flow input.')\n if success:\n # update the flows to be the flow found from maxflow problem\n for i in range(max_flow.NumArcs()):\n # if this is an original arc, update the flow\n if max_flow.Tail(i) != s_prime \\\n and max_flow.Head(i) != t_prime \\\n and not (max_flow.Tail(i) == self.sink() \\\n and max_flow.Head(i) == self.source()):\n # update arc\n start = max_flow.Tail(i)\n destin = max_flow.Head(i)\n arc = self.get_arc(start, destin)\n new_flow = self.arc_info[arc][\"lower_bound\"] + max_flow.Flow(i)\n old_flow = self.arc_info[arc][\"weight\"]\n self.arc_info[arc][\"weight\"] = new_flow\n #print(\"Edge {} {} adjusted from {} to {}\".format(\n # start,\n # destin,\n # old_flow,\n # new_flow\n # ))\n self.check_conservation_of_flow() # check that solution is valid\n return True\n else:\n return False",
"def pad(self):\n if self._mg_problem.boundaries[0] == 'periodic':\n # left side\n self.left[:] = self.mid[-self.borders[0]:]\n # right side\n self.right[:] = self.mid[:self.borders[1]]\n elif self._mg_problem.boundaries[0] == 'dirichlet':\n\n # left from border\n l_f_b = self.space_tensor[0:self.borders[0]]\n # right_from_border\n r_f_b = self.space_tensor[-self.borders[1]:]\n # left side\n self.left[:] = self.fl(l_f_b)\n # right side\n self.right[:] = self.fr(r_f_b)"
] | [
"0.6122613",
"0.60881704",
"0.5706191",
"0.5586057",
"0.554519",
"0.55225635",
"0.55225635",
"0.54242057",
"0.54017335",
"0.5394755",
"0.5353252",
"0.52999943",
"0.52834505",
"0.52750146",
"0.524787",
"0.5220676",
"0.5217967",
"0.51952493",
"0.51629597",
"0.5109538",
"0.5100428",
"0.5094992",
"0.5077157",
"0.50667536",
"0.5065042",
"0.50447977",
"0.50394416",
"0.50359344",
"0.50189936",
"0.5017399"
] | 0.66334 | 0 |
Create stratch point constraints on a chain of stretch joints. | def stretch_twist_jnts(start_jnt, end_jnt, twist_jnts):
div = 1.0 / (len(twist_jnts)+1)
for i, joint in enumerate(twist_jnts):
weight = div*(i+1)
mc.pointConstraint(start_jnt, joint, weight=1.0-weight)
mc.pointConstraint(end_jnt, joint, weight=weight) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def multi_joint_stretch(ik_ctrl, ik_last_node, switch_ctrl, fk_ctrls, jnts, ik_handle):\n\n root_grp = utils.get_parent(jnts[0])\n stretch_jnts = jnts[1:]\n stretch_fk_ctrls = fk_ctrls[1:]\n\n # create attrs\n attrs = ['upStretch','loStretch']\n for i in reversed(range(len(stretch_jnts)-2)):\n ltr = ''\n if i > 0:\n ltr = utils.letters[i]\n\n attrs.insert(1, 'midStretch'+ltr)\n\n if not mc.objExists(ik_ctrl+'.autoStretch'):\n mc.addAttr(ik_ctrl, ln='autoStretch', at='double', min=0, max=1, k=1)\n\n for i in range(len(stretch_jnts)):\n if not mc.objExists(ik_ctrl+'.'+attrs[i]):\n mc.addAttr(ik_ctrl, ln=attrs[i], at='double', dv=1, min=0.001, k=1)\n\n for fk_ctrl in fk_ctrls[:-1]:\n if not mc.objExists(fk_ctrl+'.stretch'):\n mc.addAttr(fk_ctrl, ln='stretch', at='double', dv=1, min=0.001, k=1)\n\n # store initial length of joint\n init_lengths = [mc.getAttr(j+'.tx') for j in stretch_jnts]\n abs_init_lengths = [abs(v) for v in init_lengths]\n\n total_init_length = 0\n for v in init_lengths:\n total_init_length += v\n\n abs_total_init_length = abs(total_init_length)\n\n # Create dist reader\n root_to_end_dist = utils.create_distance_reader(root_grp, ik_last_node)\n\n auto_stretch_clamp = mc.createNode('clamp')\n mc.setAttr(auto_stretch_clamp+'.minR', 1)\n mc.setAttr(auto_stretch_clamp+'.maxR', 10000000)\n mc.connectAttr(root_to_end_dist+'.stretchFactor', auto_stretch_clamp+'.inputR')\n\n mc.addAttr(ik_ctrl, ln='stretchFactor', k=0)\n mc.connectAttr(auto_stretch_clamp+'.inputR', ik_ctrl+'.stretchFactor')\n\n pma = mc.createNode('plusMinusAverage')\n utils.connect_abs(pma+'.output1D', root_to_end_dist+'.jointChainLength')\n\n # handle soft ik handle constraint override\n pc = mc.pointConstraint(ik_last_node, ik_handle)[0]\n if mc.objExists(jnts[0]+'.softIkChainLength'):\n\n # compensate chain length - feed in new chain length for soft ik chain length\n utils.connect_abs(pma+'.output1D', jnts[0]+'.softIkChainLength')\n\n # blend off the soft ik constraint IF im in auto stretch\n mc.connectAttr(ik_ctrl+'.autoStretch', pc+'.w1')\n utils.connect_reverse(pc+'.w1', pc+'.w0')\n\n # easy stuff first - create fk stretch nodes\n fk_to_ik_blends = [] # This is the final output for IK stretch\n\n for i, jnt in enumerate(stretch_jnts):\n\n # easy stuff first - create fk stretch nodes\n fk_mdl = mc.createNode('multDoubleLinear')\n mc.setAttr(fk_mdl+'.input1', mc.getAttr(jnt+'.tx'))\n mc.connectAttr(fk_ctrls[i]+'.stretch', fk_mdl+'.input2')\n utils.connect_abs(fk_mdl+'.output', fk_ctrls[i+1]+'_ZERO.tx')\n\n # Create user secifed IK stretch\n user_ik_scale_mdl = mc.createNode('multDoubleLinear')\n mc.setAttr( user_ik_scale_mdl+'.input1', init_lengths[i])\n mc.connectAttr(ik_ctrl+'.'+attrs[i], user_ik_scale_mdl+'.input2')\n\n # Now create the IK auto stretch nodes\n auto_stretch_mdl = mc.createNode('multDoubleLinear')\n mc.connectAttr(user_ik_scale_mdl+'.output', auto_stretch_mdl+'.input1', f=1)\n mc.connectAttr(auto_stretch_clamp+'.outputR', auto_stretch_mdl+'.input2', f=1)\n mc.connectAttr(user_ik_scale_mdl+'.output', '{0}.input1D[{1}]'.format(pma, i))\n\n fk_to_ik_blend = mc.createNode('blendTwoAttr')\n auto_stretch_blend = mc.createNode('blendTwoAttr')\n\n mc.connectAttr(switch_ctrl+'.IK', fk_to_ik_blend+'.attributesBlender')\n mc.connectAttr(fk_mdl+'.output', fk_to_ik_blend+'.input[0]')\n mc.connectAttr(auto_stretch_blend+'.output', fk_to_ik_blend+'.input[1]')\n\n mc.connectAttr(ik_ctrl+'.autoStretch', auto_stretch_blend+'.attributesBlender')\n mc.connectAttr(user_ik_scale_mdl+'.output', auto_stretch_blend+'.input[0]')\n mc.connectAttr(auto_stretch_mdl+'.output', auto_stretch_blend+'.input[1]')\n\n fk_to_ik_blends.append(fk_to_ik_blend+'.output')\n\n for i, jnt in enumerate(stretch_jnts):\n mc.connectAttr(fk_to_ik_blends[i], jnt+'.tx')",
"def gripStretchQgsLineStringGeometry(geom, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve):\n obj = qad_utils.whatGeomIs(0, geom)\n if (type(obj) != list and type(obj) != tuple):\n objType = obj.whatIs()\n if objType == \"CIRCLE\": # se é cerchio\n newCircle = gripStretchCircle(obj, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve)\n if newCircle is not None:\n return QgsGeometry.fromPolyline(newCircle.asPolyline(tolerance2ApproxCurve))\n elif objType == \"ARC\": # se é arco\n newArc = gripStretchArc(obj, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve)\n if newArc is not None:\n return QgsGeometry.fromPolyline(newArc.asPolyline(tolerance2ApproxCurve))\n return None\n \n linearObjectListToStretch = qad_utils.QadLinearObjectList()\n linearObjectListToStretch.fromPolyline(geom.asPolyline())\n \n atPart = 0\n while atPart < linearObjectListToStretch.qty():\n linearObject = linearObjectListToStretch.getLinearObjectAt(atPart) \n if linearObject.isSegment():\n pt = linearObject.getStartPt()\n if isPtContainedForStretch(pt, ptListToStretch): # se il punto è contenuto in ptListToStretch\n # cambio punto iniziale \n pt.setX(pt.x() + offSetX)\n pt.setY(pt.y() + offSetY)\n linearObject.setStartPt(pt)\n \n pt = linearObject.getEndPt()\n if isPtContainedForStretch(pt, ptListToStretch): # se il punto è contenuto in ptListToStretch\n # cambio punto finale\n pt.setX(pt.x() + offSetX)\n pt.setY(pt.y() + offSetY)\n linearObject.setEndPt(pt)\n else: # se è arco\n newArc, newInverseFlag = gripStretchArc(linearObject.getArc(), ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve, linearObject.isInverseArc())\n if newArc is None:\n return None\n linearObject.setArc(newArc, newInverseFlag)\n\n atPart = atPart + 1\n \n pt = linearObjectListToStretch.getCentroid(tolerance2ApproxCurve) # verifico se polilinea ha un centroide\n if pt is not None:\n if isPtContainedForStretch(pt, ptListToStretch): # se il punto è contenuto in ptListToStretch\n linearObjectListToStretch.move(offSetX, offSetY)\n \n pts = linearObjectListToStretch.asPolyline(tolerance2ApproxCurve)\n stretchedGeom = QgsGeometry.fromPolyline(pts) \n \n return stretchedGeom",
"def ar_addStretchSquash():\n setupName = 'Nose'\n sel = cmds.ls(sl=True)\n chain = cmds.ls(sel[0], dag=True, typ='joint')\n IKSpine = cmds.ikHandle(sj=chain[0], ee=chain[len(chain) - 1], sol='ikSplineSolver')\n # rename\n cmds.rename(IKSpine[0], 'IKSplineHandle_' + setupName)\n cmds.rename(IKSpine[1], 'IKSplineEff_' + setupName)\n cmds.rename(IKSpine[2], 'IKSplineCurve_' + setupName)\n # create new joints.\n cmds.select(cl=True)\n bindStartJt = cmds.joint(n='JtCrvBind01')\n cmds.select(cl=True)\n bindEndJt = cmds.joint(n='JtCrvBind02')\n cmds.delete(cmds.parentConstraint(chain[0], bindStartJt))\n cmds.delete(cmds.parentConstraint(chain[len(chain) - 1], bindEndJt))\n\n cmds.skinCluster(bindStartJt, bindEndJt, 'IKSplineCurve_' + setupName, bm=0, sm=0, nw=1, wd=0, mi=2)\n ctlStart = cmds.circle(nr=[1, 0, 0], n='Toony' + setupName + '01_CTRL', ch=False)\n extraGrp = cmds.createNode('transform', n='Toony' + setupName + '01ExtraGrp')\n offGrp = cmds.createNode('transform', n='Toony' + setupName + '01OffsetGrp')\n cmds.parent(ctlStart[0], extraGrp)\n cmds.parent(extraGrp, offGrp)\n cmds.delete(cmds.parentConstraint(bindStartJt, offGrp))\n # endJOint\n ctlEnd = cmds.circle(nr=[1, 0, 0], n='Toony' + setupName + '02_CTRL', ch=False)\n extraGrpEnd = cmds.createNode('transform', n='Toony' + setupName + '02ExtraGrp')\n offGrpEnd = cmds.createNode('transform', n='Toony' + setupName + '02OffsetGrp')\n cmds.parent(ctlEnd[0], extraGrpEnd)\n cmds.parent(extraGrpEnd, offGrpEnd)\n cmds.delete(cmds.parentConstraint(bindEndJt, offGrpEnd))\n # parent constraint wiht bind joints.\n cmds.parentConstraint(ctlStart[0], bindStartJt)\n cmds.parentConstraint(ctlEnd[0], bindEndJt)\n # Create connection with node basis.\n crvInfo = cmds.createNode('curveInfo', n='curveInfo_Toony' + setupName)\n shpCrv = cmds.listRelatives('IKSplineCurve_' + setupName, s=True)\n cmds.connectAttr(shpCrv[0] + '.worldSpace[0]', crvInfo + '.inputCurve', f=True)\n mdnForSX = cmds.createNode('multiplyDivide', n='multiplyDivide_Toony' + setupName + '_ScaleX')\n mdnForPW = cmds.createNode('multiplyDivide', n='multiplyDivide_Toony' + setupName + '_Power')\n mdnForYZ = cmds.createNode('multiplyDivide', n='multiplyDivide_Toony' + setupName + '_ScaleYZ')\n cmds.setAttr(mdnForSX + '.operation', 2)\n cmds.setAttr(mdnForPW + '.operation', 3)\n cmds.setAttr(mdnForYZ + '.operation', 2)\n # connections.\n cmds.connectAttr(crvInfo + '.arcLength', mdnForSX + '.input1X', f=True)\n cmds.setAttr(mdnForSX + '.input2X', cmds.getAttr(mdnForSX + '.input1X'))\n scaledJoint = chain[:-1]\n for each in scaledJoint:\n cmds.connectAttr(mdnForSX + '.outputX', each + '.sx', f=True)\n # power connections.\n cmds.connectAttr(mdnForSX + '.outputX', mdnForPW + '.input1X', f=True)\n cmds.setAttr(mdnForPW + '.input2X', 0.5)\n cmds.connectAttr(mdnForPW + '.outputX', mdnForYZ + '.input2X', f=True)\n cmds.setAttr(mdnForYZ + '.input1X', 1)\n for each in scaledJoint:\n cmds.connectAttr(mdnForYZ + '.outputX', each + '.sy')\n cmds.connectAttr(mdnForYZ + '.outputX', each + '.sz')\n # TODO: need to full proof this function.",
"def biped_stretch(ik_ctrl,\n ik_last_node,\n pv_ctrl,\n switch_ctrl,\n up_arm_fk_ctrl,\n lo_arm_fk_ctrl,\n wrist_fk_ctrl,\n up_arm_ik_jnt,\n lo_arm_ik_jnt,\n wrist_ik_jnt,\n ik_handle,\n pin_attr_name='pinElbow',\n shift_attr_name='shiftElbow'):\n\n # add all my attrs on ctrls\n mc.addAttr(ik_ctrl, ln=pin_attr_name, at='double', min=0, max=1, k=1)\n mc.addAttr(ik_ctrl, ln=shift_attr_name, at='double', min=-1, max=1, k=1)\n\n mc.addAttr(ik_ctrl, ln='autoStretch', at='double', min=0, max=1, k=1)\n mc.addAttr(ik_ctrl, ln='upStretch', at='double', dv=1, min=0.001, k=1)\n mc.addAttr(ik_ctrl, ln='loStretch', at='double', dv=1, min=0.001, k=1)\n\n mc.addAttr(up_arm_fk_ctrl, ln='stretch', at='double', dv=1, min=0.001, k=1)\n mc.addAttr(lo_arm_fk_ctrl, ln='stretch', at='double', dv=1, min=0.001, k=1)\n\n # store initial length of joint\n lo_init_length = mc.getAttr(lo_arm_ik_jnt+'.tx')\n wrist_init_length = mc.getAttr(wrist_ik_jnt+'.tx')\n max_init_length = mc.getAttr(lo_arm_ik_jnt+'.tx')+mc.getAttr(wrist_ik_jnt+'.tx')\n\n lo_abs_init_length = abs(mc.getAttr(lo_arm_ik_jnt+'.tx'))\n wrist_abs_length = abs(mc.getAttr(wrist_ik_jnt+'.tx'))\n\n # Get parents for ik handle and root of the parm\n arm_root_grp = utils.get_parent(up_arm_ik_jnt)\n\n # Create distance nodes between base, end, and pv ctrl to get the length of side of the triangle\n root_to_end_dist = utils.create_distance_reader(arm_root_grp, ik_last_node)\n root_to_pv_dist = utils.create_distance_reader(arm_root_grp, pv_ctrl)\n pv_to_end_dist = utils.create_distance_reader(pv_ctrl, ik_last_node)\n\n # easy stuff first - create fk stretch nodes\n lo_arm_fk_mdl = mc.createNode('multDoubleLinear')\n wrist_fk_mdl = mc.createNode('multDoubleLinear')\n\n mc.setAttr(lo_arm_fk_mdl+'.input1', mc.getAttr(lo_arm_ik_jnt+'.tx'))\n mc.setAttr(wrist_fk_mdl+'.input1', mc.getAttr(wrist_ik_jnt+'.tx'))\n mc.connectAttr(up_arm_fk_ctrl+'.stretch', lo_arm_fk_mdl+'.input2')\n mc.connectAttr(lo_arm_fk_ctrl+'.stretch', wrist_fk_mdl+'.input2')\n\n utils.connect_abs(lo_arm_fk_mdl+'.output', lo_arm_fk_ctrl+'_ZERO.tx')\n if wrist_fk_ctrl and mc.objExists(wrist_fk_ctrl):\n utils.connect_abs(wrist_fk_mdl+'.output', wrist_fk_ctrl+'_ZERO.tx')\n\n # These arethe final fk stretch outputs to connect to joints\n fk_stretch_final_output = [lo_arm_fk_mdl+'.output', wrist_fk_mdl+'.output']\n\n # NOW creates node s for thew elbow pin\n lo_arm_pin_mdl = mc.createNode('multDoubleLinear')\n wrist_pin_mdl = mc.createNode('multDoubleLinear')\n\n mc.setAttr(lo_arm_pin_mdl+'.input1', 1)\n mc.setAttr(wrist_pin_mdl+'.input1', 1)\n\n if lo_init_length < 0.0:\n mc.setAttr(lo_arm_pin_mdl+'.input1', -1)\n\n if wrist_init_length < 0.0:\n mc.setAttr(wrist_pin_mdl+'.input1', -1)\n\n mc.connectAttr(root_to_pv_dist+'.localDistance', lo_arm_pin_mdl+'.input2')\n mc.connectAttr(pv_to_end_dist+'.localDistance', wrist_pin_mdl+'.input2')\n\n # These arethe final elbow pin stretch outputs to connect to joints\n pin_final_output = [lo_arm_pin_mdl+'.output', wrist_pin_mdl+'.output']\n\n # create shift nodes\n mc.addAttr(lo_arm_ik_jnt, ln='shiftLength', k=1)\n mc.addAttr(wrist_ik_jnt, ln='shiftLength', k=1)\n\n tt = 'linear'\n mc.setDrivenKeyframe(lo_arm_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=0, v=lo_init_length, itt=tt, ott=tt)\n mc.setDrivenKeyframe(lo_arm_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=1, v=0, itt=tt, ott=tt)\n mc.setDrivenKeyframe(lo_arm_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=-1, v=max_init_length, itt=tt, ott=tt)\n\n mc.setDrivenKeyframe(wrist_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=0, v=wrist_init_length, itt=tt, ott=tt)\n mc.setDrivenKeyframe(wrist_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=1, v=max_init_length, itt=tt, ott=tt)\n mc.setDrivenKeyframe(wrist_ik_jnt+'.shiftLength', cd=ik_ctrl+'.'+shift_attr_name, dv=-1, v=0, itt=tt, ott=tt)\n\n shift_final_output = [ lo_arm_ik_jnt+'.shiftLength', wrist_ik_jnt+'.shiftLength']\n\n # Create ik indivisual stretch nodes\n lo_arm_ik_scale_mdl = mc.createNode('multDoubleLinear')\n wrist_ik_scale_mdl = mc.createNode('multDoubleLinear')\n\n mc.connectAttr(shift_final_output[0], lo_arm_ik_scale_mdl+'.input1')\n mc.connectAttr(shift_final_output[1], wrist_ik_scale_mdl+'.input1')\n mc.connectAttr(ik_ctrl+'.upStretch', lo_arm_ik_scale_mdl+'.input2')\n mc.connectAttr(ik_ctrl+'.loStretch', wrist_ik_scale_mdl+'.input2')\n\n # This is the final output for scale and shift\n ik_stretch_final_output = [lo_arm_ik_scale_mdl+'.output', wrist_ik_scale_mdl+'.output']\n\n # Now create the IK auto stretch nodes\n lo_auto_stretch_mdl = mc.createNode('multDoubleLinear')\n wrist_auto_stretch_mdl = mc.createNode('multDoubleLinear')\n\n auto_stretch_clamp = mc.createNode('clamp')\n mc.setAttr(auto_stretch_clamp+'.minR', 1)\n mc.setAttr(auto_stretch_clamp+'.maxR', 10000000)\n\n mc.connectAttr(ik_stretch_final_output[0], lo_auto_stretch_mdl+'.input1', f=1)\n mc.connectAttr(ik_stretch_final_output[1], wrist_auto_stretch_mdl+'.input1', f=1)\n mc.connectAttr(root_to_end_dist+'.stretchFactor', auto_stretch_clamp+'.inputR')\n\n mc.connectAttr(auto_stretch_clamp+'.outputR', lo_auto_stretch_mdl+'.input2', f=1)\n mc.connectAttr(auto_stretch_clamp+'.outputR', wrist_auto_stretch_mdl+'.input2', f=1)\n\n adl = mc.createNode('addDoubleLinear')\n mc.connectAttr(lo_arm_ik_scale_mdl+'.output', adl+'.input1')\n mc.connectAttr(wrist_ik_scale_mdl+'.output', adl+'.input2')\n utils.connect_abs(adl+'.output', root_to_end_dist+'.jointChainLength')\n\n # handle soft ik handle constraint override\n pc = mc.pointConstraint(ik_last_node, ik_handle)[0]\n if mc.objExists(up_arm_ik_jnt+'.softIkChainLength'):\n\n # compensate feed in new chain length for soft ik chain length\n utils.connect_abs(adl+'.output', up_arm_ik_jnt+'.softIkChainLength')\n\n # blend off the soft ik constraint IF im in auto s tretch or pin mode\n mdl = mc.createNode('multDoubleLinear')\n utils.connect_reverse(ik_ctrl+'.'+pin_attr_name, mdl+'.input1')\n utils.connect_reverse(ik_ctrl+'.autoStretch', mdl+'.input2')\n mc.connectAttr(mdl+'.output', pc+'.w0')\n utils.connect_reverse(pc+'.w0', pc+'.w1')\n\n ik_auto_stretch_final_output = [lo_auto_stretch_mdl+'.output', wrist_auto_stretch_mdl+'.output']\n\n # now create all my blends\n\n # first blend btween FK and an empty ik input\n # (this ikl input will take another blend node for blending oall the IK options )\n fk_to_ik_blend = mc.createNode('blendColors')\n\n mc.connectAttr(switch_ctrl+'.IK', fk_to_ik_blend+'.blender')\n mc.connectAttr(fk_stretch_final_output[0], fk_to_ik_blend+'.color2R')\n mc.connectAttr(fk_stretch_final_output[1], fk_to_ik_blend+'.color2G')\n\n # now create a blender between pin elbow and the rest of the ik options\n auto_ik_blend = mc.createNode('blendColors')\n\n mc.connectAttr(ik_ctrl+'.autoStretch', auto_ik_blend+'.blender')\n mc.connectAttr(ik_auto_stretch_final_output[0], auto_ik_blend+'.color1R')\n mc.connectAttr(ik_auto_stretch_final_output[1], auto_ik_blend+'.color1G')\n\n # Now connect it toth fk blend\n mc.connectAttr(auto_ik_blend+'.outputR', fk_to_ik_blend+'.color1R')\n mc.connectAttr(auto_ik_blend+'.outputG', fk_to_ik_blend+'.color1G')\n\n # now create a blender between pin elbow and the rest of the ik options\n pin_ik_blend = mc.createNode('blendColors')\n\n mc.connectAttr(ik_ctrl+'.'+pin_attr_name, pin_ik_blend+'.blender')\n mc.connectAttr(pin_final_output[0], pin_ik_blend+'.color1R')\n mc.connectAttr(pin_final_output[1], pin_ik_blend+'.color1G')\n\n # Now connect it toth fk blend\n mc.connectAttr(pin_ik_blend+'.outputR', auto_ik_blend+'.color2R')\n mc.connectAttr(pin_ik_blend+'.outputG', auto_ik_blend+'.color2G')\n\n # now connect the shift and scale\n mc.connectAttr(ik_stretch_final_output[0], pin_ik_blend+'.color2R')\n mc.connectAttr(ik_stretch_final_output[1], pin_ik_blend+'.color2G')\n\n # now for the magic! Connect the blend networll to joints\n mc.connectAttr(fk_to_ik_blend+'.outputR', lo_arm_ik_jnt+'.tx')\n mc.connectAttr(fk_to_ik_blend+'.outputG', wrist_ik_jnt+'.tx')",
"def gripStretchQgsGeometry(geom, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve):\n wkbType = geom.wkbType()\n if wkbType == QGis.WKBPoint or wkbType == QGis.WKBPoint25D:\n pt = stretchPoint(geom.asPoint(), ptListToStretch, offSetX, offSetY)\n if pt is not None:\n return QgsGeometry.fromPoint(pt)\n \n if wkbType == QGis.WKBMultiPoint:\n stretchedGeom = QgsGeometry(geom)\n points = stretchedGeom.asMultiPoint() # vettore di punti\n atSubGeom = 0\n for pt in points:\n subGeom = QgsGeometry.fromPoint(pt)\n stretchedSubGeom = gripStretchQgsGeometry(subGeom, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve)\n stretchedGeom = qad_utils.setSubGeom(stretchedGeom, stretchedSubGeom, [atSubGeom]) \n atSubGeom = atSubGeom + 1\n return stretchedGeom\n\n if wkbType == QGis.WKBLineString:\n return gripStretchQgsLineStringGeometry(geom, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve)\n \n if wkbType == QGis.WKBMultiLineString:\n stretchedGeom = QgsGeometry(geom)\n lines = stretchedGeom.asMultiPolyline() # lista di linee\n atSubGeom = 0\n for line in lines: \n subGeom = QgsGeometry.fromPolyline(line)\n stretchedSubGeom = gripStretchQgsGeometry(subGeom, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve)\n stretchedGeom = qad_utils.setSubGeom(stretchedGeom, stretchedSubGeom, [atSubGeom]) \n atSubGeom = atSubGeom + 1\n return stretchedGeom\n \n if wkbType == QGis.WKBPolygon:\n stretchedGeom = QgsGeometry(geom)\n lines = stretchedGeom.asPolygon() # lista di linee\n atSubGeom = 0\n for line in lines: \n subGeom = QgsGeometry.fromPolyline(line)\n stretchedSubGeom = gripStretchQgsGeometry(subGeom, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve)\n stretchedGeom = qad_utils.setSubGeom(stretchedGeom, stretchedSubGeom, [atSubGeom]) \n atSubGeom = atSubGeom + 1\n return stretchedGeom\n \n if wkbType == QGis.WKBMultiPolygon:\n stretchedGeom = QgsGeometry(geom)\n polygons = geom.asMultiPolygon() # vettore di poligoni\n atSubGeom = 0\n for polygon in polygons:\n subGeom = QgsGeometry.fromPolygon(polygon)\n stretchedSubGeom = gripStretchQgsGeometry(subGeom, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve)\n stretchedGeom = qad_utils.setSubGeom(stretchedGeom, stretchedSubGeom, [atSubGeom]) \n atSubGeom = atSubGeom + 1\n return stretchedGeom\n \n return None",
"def gripStretchCircle(circle, basePt, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve):\n newCenter = QgsPoint(circle.center)\n newRadius = circle.radius\n \n for ptToStretch in ptListToStretch:\n if qad_utils.ptNear(ptToStretch, circle.center): # se i punti sono sufficientemente vicini\n newCenter.set(circle.center.x() + offSetX, circle.center.y() + offSetY)\n elif circle.isPtOnCircle(ptToStretch):\n newPt = QgsPoint(basePt.x() + offSetX, basePt.y() + offSetY)\n newRadius = qad_utils.getDistance(circle.center, newPt)\n\n newCircle = qad_circle.QadCircle()\n if newCircle.set(newCenter, newRadius) == False:\n return None\n \n return newCircle",
"def generate_constraints_between_chains(self):\n node_to_chain_mapping = defaultdict(set)\n # collect all places where each node is used and at what subchain index\n for chain_idx in range(len(self.chains)):\n chain = self.chains[chain_idx]\n for subchain_idx in range(len(chain)):\n parent, child = chain[subchain_idx]\n node_to_chain_mapping[parent].add(\n AbstractConstraint(chain_idx, subchain_idx)\n )\n # don't forget about the final child in the chain (parents are already accounted for)\n final_parent, final_child = chain[-1]\n node_to_chain_mapping[final_child].add(\n AbstractConstraint(chain_idx, len(chain))\n )\n # our final mapping correlates constraints on a per-chain basis\n # e.g. for chain index 0 at subchain index 1, we have a constraint (shared node) in chain 2\n chain_constraints = list()\n for chain_idx in range(len(self.chains)):\n chain = self.chains[chain_idx]\n chain_constraint = [set() for i in range(len(chain) + 1)]\n for subchain_idx in range(len(chain)):\n parent, child = chain[subchain_idx]\n node_constraints = node_to_chain_mapping[parent]\n for constraint in node_constraints:\n if constraint.chain_index != chain_idx:\n chain_constraint[subchain_idx].add(constraint)\n # don't forget about the final child in the chain (parents are already accounted for)\n final_parent, final_child = chain[-1]\n node_constraints = node_to_chain_mapping[final_child]\n for constraint in node_constraints:\n if constraint.chain_index != chain_idx:\n chain_constraint[len(chain)].add(constraint)\n chain_constraints.append(chain_constraint)\n return chain_constraints",
"def stretch(points, stretches=[1, 1]):\n x = stretches[0] * points[0]\n y = stretches[1] * points[1]\n return [x, y]",
"def _adjust_constraints(self, point):\n logger.info(f'Adjusting constraints on point {len(self)}')\n\n # Flat list of all the atom indexes involved in the bonds\n atom_idxs = [i for bond in self.bonds for i in bond]\n\n max_step, min_step = ade.Config.max_step_size, ade.Config.min_step_size\n\n for bond in self.bonds:\n (i, j), coords = bond.atom_indexes, self[-1].species.coordinates\n\n # Normalised r_ij vector\n vec = coords[j] - coords[i]\n vec /= np.linalg.norm(vec)\n\n # Calculate |∇E_i·r| i.e. the gradient along the bond. Positive\n # values are downhill in energy to form the bond and negative\n # downhill to break it\n gradi = np.dot(self[-1].grad[i], vec) # |∇E_i·r| bond midpoint\n gradj = np.dot(self[-1].grad[j], -vec)\n\n # Exclude gradients from atoms that are being substituted\n if atom_idxs.count(i) > 1:\n grad = gradj\n elif atom_idxs.count(j) > 1:\n grad = gradi\n else:\n grad = np.average((gradi, gradj))\n\n logger.info(f'|∇E_i·r| = {grad:.4f} on {bond}')\n\n # Downhill in energy to break/form this breaking/forming bond\n if grad * np.sign(bond.dr) > 0:\n dr = np.sign(bond.dr) * ade.Config.max_step_size\n\n # otherwise use a scaled value, depending on the gradient\n # large values will have small step sizes, down to min_step Å\n else:\n dr = (max_step - min_step) * np.exp(-(grad/0.05)**2) + min_step\n dr *= np.sign(bond.dr)\n\n new_dist = point.species.distance(*bond.atom_indexes) + dr\n\n # No need to go exceed final distances on forming/breaking bonds\n if bond.forming and new_dist < bond.final_dist:\n new_dist = bond.final_dist\n\n elif bond.breaking and new_dist > bond.final_dist:\n new_dist = bond.final_dist\n\n else:\n logger.info(f'Using step {dr:.3f} Å on bond: {bond}')\n\n point.constraints[bond.atom_indexes] = new_dist\n\n return None",
"def gripStretchQgsLinearObjectList(linearObjectList, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve):\n linearObjectListToStretch = qad_utils.QadLinearObjectList(linearObjectList)\n \n atPart = 0\n while atPart < linearObjectListToStretch.qty():\n linearObject = linearObjectListToStretch.getLinearObjectAt(atPart) \n if linearObject.isSegment():\n pt = linearObject.getStartPt()\n if isPtContainedForStretch(pt, ptListToStretch): # se il punto è contenuto in ptListToStretch\n # cambio punto iniziale \n pt.setX(pt.x() + offSetX)\n pt.setY(pt.y() + offSetY)\n linearObject.setStartPt(pt)\n \n pt = linearObject.getEndPt()\n if isPtContainedForStretch(pt, ptListToStretch): # se il punto è contenuto in ptListToStretch\n # cambio punto finale\n pt.setX(pt.x() + offSetX)\n pt.setY(pt.y() + offSetY)\n linearObject.setEndPt(pt)\n else: # se è arco\n newArc, newInverseFlag = gripStretchArc(linearObject.getArc(), ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve, linearObject.isInverseArc())\n if newArc is None:\n return None\n linearObject.setArc(newArc, newInverseFlag)\n\n atPart = atPart + 1\n \n pt = linearObjectListToStretch.getCentroid(tolerance2ApproxCurve) # verifico se polilinea ha un centroide\n if pt is not None:\n if isPtContainedForStretch(pt, ptListToStretch): # se il punto è contenuto in ptListToStretch\n linearObjectListToStretch.move(offSetX, offSetY)\n\n return linearObjectListToStretch",
"def make_shaped_repertoire(RNs):\n # get objective distribution\n bin_edges, obj_dist, volume = objective_distribution()\n # get an antigenic epitope sequence, and in case of nkey=1,2 check whether\n # it can populate all required bins, thus avoiding infinite loop below\n AgEpitope = get_AgEpitope(RNs)\n if cf.nkey == 1 or cf.nkey == 2:\n while 1:\n # get list of all possible binding partners and their energies\n all_partners = get_all_partners()\n all_energies = [E_best(partner, AgEpitope)\n for partner in all_partners]\n # check whether all bins are occupiable with these energies,\n # if not, get new epitope sequence\n indices = np.digitize(all_energies, bin_edges, right=True)\n ind_set = set(indices)\n ind_set.discard(0)\n # if all bins can be occupied, move on\n if ind_set == set(range(1, len(bin_edges))):\n break\n # else get a new epitope and check its validity\n else:\n AgEpitope = get_AgEpitope(RNs)\n # initialise empty list for counting how many seqs have been found per bin\n ist_dist = np.zeros(len(obj_dist))\n # seq_list for collecting identified sequences\n seq_list = []\n E_list = []\n # while ist_dist and obj_dist are not equal, get new sequences and position\n # them if they are useful\n # introduce a tolerance of how far bins are allowed to deviate from the\n # goal, as otherwise runtime explodes due to very long waiting times for\n # high binding energy codes in large nkey cases - allow an absolute\n # deviation of volume*tolerance % for each bin.\n abs_tol = volume * 0.005\n while np.sum(np.abs((ist_dist-obj_dist)) > abs_tol) > 0:\n ab = Ab_seq(RNs)\n Emax = E_best(ab, AgEpitope)\n # find index bin of this energy\n indx = np.digitize(Emax, bin_edges, right=True)\n # if the index is in the useful range and the bin is not yet full,\n # count the sequence and store it\n if indx in range(1, len(bin_edges)):\n if obj_dist[indx-1] - ist_dist[indx-1] > 0:\n ist_dist[indx-1] += 1\n seq_list.append(ab)\n E_list.append(Emax)\n\n return seq_list, E_list, AgEpitope",
"def full_strain(x, dof):\n base = np.zeros([6, dof])\n\n if dof % 2 == 1:\n bending_start = 1\n base[0, 0] = 1 # constant torsion\n else:\n bending_start = 2\n base[0, 0] = 1 # constant torsion\n base[0, 1] = x # linear torsion\n\n base[1, bending_start] = 1 # y-bending\n base[1, bending_start + 1] = x # linear y-bending\n\n if dof <= 6:\n base[2, bending_start + 2] = 1 # z-bending\n base[2, bending_start + 3] = x # linear z-bending\n else:\n base[1, bending_start + 2] = x ** 2 # quadratic y-bending\n base[2, bending_start + 3] = 1 # z-bending\n base[2, bending_start + 4] = x # linear z-bending\n base[2, bending_start + 5] = x ** 2 # quadratic z-bending\n return base",
"def generate_constraints():\n return list(chain(collect_rows(), collect_columns(), collect_blocks()))",
"def make_stair(nstep,treadDept,riserHeight,landingLength,stepWidth,n):\n\tstep = MKPOL([[[0,0],[0,riserHeight],[2*treadDept,riserHeight], [treadDept,0]],[[1,2,3,4]],1])\n\tstep1 = MKPOL([[[0,0],[0,riserHeight],[treadDept,2*riserHeight], [treadDept,riserHeight]],[[1,2,3,4]],1])\n\tstep = PROD([QUOTE([stepWidth]),step])\n\tstep = TEXTURE(\"texture/Liptus.jpg\")(step)\n\thandrailTop = PROD([QUOTE([stepWidth/15.0]),step1])\n\thandrail = CIRCLE(stepWidth/30.0)([20,20])\n\n\thandrail = PROD([QUOTE([1]),handrail])\n\n\thandrail = R([1,3])(PI/2)(handrail)\n\thandrail = T([1,2,3])([stepWidth-(stepWidth/30.0),treadDept/2,riserHeight])(handrail)\n\thandrail = COLOR(BLACK)(handrail)\n\tstep = STRUCT([step,handrail])\n\thandrailTop = R([2,3])(PI)(handrailTop)\n\thandrailTop = T([1,2,3])([stepWidth-(stepWidth/15.0),treadDept,1+2*riserHeight])(handrailTop)\n\thandrailTop = TEXTURE(\"texture/Liptus.jpg\")(handrailTop)\n\tstep = STRUCT([step,handrailTop])\n\tstair = [step]\n\tif n == 0:\n\t\tstair = []\n\t\"\"\" realization total step \"\"\"\n\tfor i in range(nstep):\n\t\tstep = T([2,3])([treadDept,riserHeight])(step)\n\t\tstair.append(step)\n\tfinalStep = T([2,3])([(treadDept*(nstep+1)),(riserHeight*(nstep))])(CUBOID([stepWidth,landingLength,riserHeight]))\n\tfinalStep = TEXTURE(\"texture/Liptus.jpg\")(finalStep)\n\tstair.append(finalStep)\n\treturn STRUCT(stair)",
"def createSpSwConstraint(parents, target, enumNames, niceNames=['Space'],constrType='parent',constrTarget=''):\n if constrTarget == '':\n if target.endswith('_CTRL'):\n stripName=target.rpartition('_')\n constrTarget=stripName[0]+'Ctrl_ROOT'\n else:\n constrTarget=target\n\n if niceNames <= 1:\n niceName=niceNames\n else:\n niceName=''\n for i,x in enumerate(niceNames):\n if i < len(niceNames)-1:\n niceName=niceName+x+' / '\n else:\n niceName=niceName+x\n\n existingAttr=cmds.listAttr(target)\n constr=eval('cmds.'+constrType+'Constraint(parents,constrTarget,mo=True)')\n if 'spSwSep' not in existingAttr:\n cmds.addAttr(target, ln='spSwSep', nn='___ Space Switching', at='enum', en='___', k=True)\n cmds.addAttr(target, ln='spaceSwitch', nn=niceName+' Switch', at='enum', en=enumNames, k=True)\n for i,x in enumerate(parents):\n if not i == 1:\n rev=cmds.createNode('reverse', n=target+'spaceSwitch_REV')\n cmds.connectAttr(target+'.spaceSwitch',rev+'.inputX')\n cmds.connectAttr(rev+'.outputX', constr[0]+'.'+x+'W'+str(i))\n else:\n cmds.connectAttr(target+'.spaceSwitch', constr[0]+'.'+x+'W'+str(i))",
"def pk_constrained(self, snr=30, headroom = 0):\n # Initialize\n self.pk = np.zeros((self.n_waves, len(self.controls.k0)), dtype=np.csingle)\n # loop over frequencies\n bar = tqdm(total = len(self.controls.k0), desc = 'Calculating Constrained Optim.')\n for jf, k0 in enumerate(self.controls.k0):\n # get the scaled version of the propagating directions\n k_vec = k0 * self.dir\n # Form the sensing matrix\n h_mtx = np.exp(1j*self.receivers.coord @ k_vec.T)\n H = h_mtx.astype(complex) # cvxpy does not accept floats, apparently\n # measured data\n pm = self.pres_s[:,jf].astype(complex)\n # Performing constrained optmization cvxpy\n x_cvx = cp.Variable(h_mtx.shape[1], complex = True) # create x variable\n # Create the problem\n epsilon = 10**(-(snr-headroom)/10)\n problem = cp.Problem(cp.Minimize(cp.norm2(x_cvx)**2),\n [cp.pnorm(pm - cp.matmul(H, x_cvx), p=2) <= epsilon])\n problem.solve(solver=cp.SCS, verbose=False)\n self.pk[:,jf] = x_cvx.value\n bar.update(1)\n bar.close()",
"def constraints(self) -> Tuple[NDArray, NDArray]:",
"def generate_powerset_bridge_constraints(problem):\n\n c_30 = _dynamic_constraint_30(problem)\n c_33 = _dynamic_constraint_33(problem)\n c_34 = _dynamic_constraint_34(problem)\n c_35 = _dynamic_constraint_35(problem)\n c_36 = _dynamic_constraint_36(problem)\n\n return c_30 & c_33 & c_34 & c_35 & c_36",
"def constraints(self):\n ...",
"def gripStretchArc(arc, ptListToStretch, offSetX, offSetY, tolerance2ApproxCurve, inverseArc = None):\n startPt = arc.getStartPt()\n endPt = arc.getEndPt()\n middlePt = arc.getMiddlePt()\n newStartPt = QgsPoint(startPt)\n newEndPt = QgsPoint(endPt)\n newMiddlePt = QgsPoint(middlePt)\n newCenter = None\n startPtChanged = endPtChanged = middlePtPtChanged = False\n for ptToStretch in ptListToStretch:\n if qad_utils.ptNear(ptToStretch, arc.center): # se i punti sono sufficientemente vicini\n newCenter = QgsPoint(arc.center.x() + offSetX, arc.center.y() + offSetY)\n else:\n if qad_utils.ptNear(startPt, ptToStretch):\n newStartPt.set(startPt.x() + offSetX, startPt.y() + offSetY)\n startPtChanged = True\n elif qad_utils.ptNear(endPt, ptToStretch):\n newEndPt.set(endPt.x() + offSetX, endPt.y() + offSetY)\n endPtChanged = True\n elif qad_utils.ptNear(middlePt, ptToStretch):\n newMiddlePt.set(middlePt.x() + offSetX, middlePt.y() + offSetY)\n middlePtPtChanged = True\n \n newArc = qad_arc.QadArc()\n if newArc.fromStartSecondEndPts(newStartPt, newMiddlePt, newEndPt) == False:\n return None\n \n # se il centro era nei punti di grip\n if newCenter is not None:\n # se i tre punti dell'arco erano nei punti di grip oppure\n # allora non cambio il centro\n if (startPtChanged and endPtChanged and middlePtPtChanged):\n pass\n else:\n newArc.center.set(newCenter.x(), newCenter.y())\n \n if inverseArc is not None: # se l'arco faceva parte di una linestring\n # verifico il verso del nuovo arco\n if qad_utils.ptNear(newStartPt, newArc.getStartPt()):\n # stesso verso del vecchio arco\n return newArc, inverseArc\n else:\n return newArc, not inverseArc\n \n return newArc",
"def create_cont_constraint_mat_separable(H,v1s,v2s,nSides,nConstraints,nC,\n dim_domain,dim_range,tess):\n if dim_domain != 2:\n raise ValueError\n if dim_range not in [1,2]:\n raise ValueError\n nHomoCoo=dim_domain+1 \n length_Avee = dim_range*nHomoCoo\n L1 = np.zeros((nConstraints/2,nC*nHomoCoo))\n\n \n\n nPtsInSide = 2 # Since, in 2D, the side is always a line joining 2 pts.\n# if nSides != nConstraints/(nPtsInSide*dim_domain):\n# raise ValueError(nSides,nConstraints)\n \n if nSides != nConstraints/(nPtsInSide*dim_range):\n print \" print nSides , nConstraints/(nPtsInSide*dim_range):\"\n print nSides , nConstraints/(nPtsInSide*dim_range)\n ipshell('stop')\n raise ValueError( nSides , (nConstraints,nPtsInSide,dim_range))\n\n \n if nSides != H.shape[0]:\n raise ValueError(nSides,H.shape)\n\n\n# M = nPtsInSide*dim_range\n M = nPtsInSide\n if dim_range == 1:\n raise NotImplementedError\n for i in range(nSides): \n v1 = v1s[i]\n v2 = v2s[i]\n \n h = H[i]\n a,b = h.nonzero()[0] # idx for the relevant As \n # s stands for start\n # e stands for end \n s1 = a*length_Avee \n e1 = s1+nHomoCoo \n s2 = b*length_Avee\n e2 = s2+nHomoCoo \n \n # Constraint 1: \n L[i*M,s1:e1]= v1 \n L[i*M,s2:e2]= -v1 \n # Constraint 2: \n L[i*M+1,s1:e1]= v2 \n L[i*M+1,s2:e2]= -v2 \n \n \n elif dim_range==2:\n for i in range(nSides): \n v1 = v1s[i]\n v2 = v2s[i]\n\n if np.allclose(v1,v2):\n raise ValueError(v1,v2)\n\n\n \n \n \n h = H[i]\n a,b = h.nonzero()[0] # idx for the relevant As \n \n\n # L1 is acting on columns of the following form:\n # [ a_1 b_1 c_1 d_1 a_2 b_2 c_2 d_2 ... a_Nc b_Nc c_Nc d_Nc] \n # s stands for start\n # e stands for end \n s1 = a*nHomoCoo\n e1 = s1+nHomoCoo \n s2 = b*nHomoCoo\n e2 = s2+nHomoCoo \n \n \n try: \n # Constraint 1: \n row = np.zeros(L1.shape[1])\n row[s1:e1]=v1\n row[s2:e2]=-v1 \n # x component \n L1[i*M]=row \n except:\n ipshell('fail')\n raise \n\n # Constraint 2: \n row = np.zeros(L1.shape[1])\n row[s1:e1]=v2\n row[s2:e2]=-v2 \n # x component \n L1[i*M+1]=row\n \n\n \n \n \n \n \n else:\n raise ValueError(dim_range)\n\n \n return L1",
"def create_fk_chain(controls, joints):\n\n # create control offset transforms\n constraints = []\n exp_tf_ms = []\n\n for ctl in controls:\n par = cmds.listRelatives(ctl, parent=True)\n buf = create_offset_transform(ctl, BUF)\n exp = create_offset_transform(ctl, EXP)\n off = create_offset_transform(ctl, OFF)\n\n cmds.parent(ctl, off)\n cmds.parent(off, exp)\n cmds.parent(exp, buf)\n if par: cmds.parent(buf, par[0])\n\n exp_tf_ms.append(buf)\n\n for src, trg in zip(controls, joints):\n # constrain fk joints to controls, hide the constraint nodes\n pc = cmds.parentConstraint(src, trg, mo=True)[0]\n cmds.setAttr('{node}.interpType'.format(node=pc), 2)\n cmds.setAttr('{node}.visibility'.format(node=pc), False)\n sc = cmds.scaleConstraint(src, trg)[0]\n cmds.setAttr('{node}.visibility'.format(node=sc), False)\n constraints.extend([pc, sc])\n\n return constraints, exp_tf_ms",
"def generate_all_constraints(traj,policy,mdp):\n #print('generating all constraints')\n constraints = []\n traj_tmp = list(traj)\n #print(traj_tmp)\n #compute halfspace normals for all (s,a) pairs until terminal\n while(len(traj_tmp)>1):\n constraints += generate_half_space_normals(traj_tmp,policy,mdp)\n #print(constraints)\n traj_tmp.pop(0)\n #print('after pop',traj_tmp)\n return constraints",
"def test_tensor_composite_constraints_equal_penalties():\n from pygam.penalties import derivative\n\n def der1(*args, **kwargs):\n kwargs.update({'derivative':1})\n return derivative(*args, **kwargs)\n\n # create a 3D tensor where the penalty should be equal to the constraint\n term = te(0, 1, 2,\n n_splines=[4, 5, 6],\n penalties=der1,\n lam=1,\n constraints='monotonic_inc')\n\n # check all the dimensions\n for i in range(3):\n P = term._build_marginal_penalties(i).A\n C = term._build_marginal_constraints(i,\n -np.arange(term.n_coefs),\n constraint_lam=1,\n constraint_l2=0).A\n\n assert (P == C).all()",
"def initializeConstraints(self):\n constraints = []\n\n for i in range(9):\n for j in range(9):\n for k in range(j+1, 9):\n # initialize row constraints\n constraints.append((i*9 + j, i*9 + k))\n # initialize col constraints\n constraints.append((j*9 + i, k*9 + i))\n\n # initialize square constraints\n pos1 = i * 9 + j\n for diff in [7, 8, 10, 11, 16, 17, 19, 20]:\n pos2 = i * 9 + j + diff\n if self.inSameSquare(pos1, pos2):\n constraints.append((pos1, pos2))\n\n return constraints",
"def linear_strain(x, dof):\n base = np.zeros([6, dof])\n base[1, 0] = 1 # initial y-bending\n if dof > 2:\n base[1, 1] = x # linear y-bending term\n base[2, dof-1] = x # linear z-bending term\n return base",
"def force ( box, strain, r ):\n\n import numpy as np\n from itertools import product\n import math\n \n # It is assumed that positions are in units where box = 1\n # Forces are calculated in units where sigma = 1 and epsilon = 1\n # Lees-Edwards boundaries, in sliding brick arrangement\n # Flow/gradient/vorticity directions are x/y/z == 0/1/2\n # Uses neighbour lists\n\n n = r.shape[0]\n\n # Set up vectors to half the cells in neighbourhood of 3x3x3 cells in cubic lattice\n # The cells are chosen so that if (d0,d1,d2) appears, then (-d0,-d1,-d2) does not.\n # The last three cells are extra ones, to cope with the sheared system\n d = np.array ( [ [ 0, 0, 0], [ 1, 0, 0], [ 1, 0, 1], [-1, 0, 1], [ 0, 0, 1], # 5 cells with d1=0\n [ 1, 1, -1], [ 1, 1, 0], [ 1, 1, 1], # 3 cells with d0= 1, d1=1\n [ 0, 1, -1], [ 0, 1, 0], [ 0, 1, 1], # 3 cells with d0= 0, d1=1\n [-1, 1, -1], [-1, 1, 0], [-1, 1, 1], # 3 cells with d0=-1, d1=1\n [-2, 1, -1], [-2, 1, 0], [-2, 1, 1] ] ) # 3 cells with d0=-2, d1=1\n\n r[:,0] = r[:,0] - np.rint(r[:,1])*strain # Extra correction in box=1 units\n r = r - np.rint(r) # Ensure all atoms in periodic box\n \n sr2_ovr = 1.77 # Overlap threshold (pot > 100)\n r_cut_box = r_cut / box\n r_cut_box_sq = r_cut_box ** 2\n box_sq = box ** 2\n\n # Initialize\n f = np.zeros_like(r)\n total = PotentialType ( pot=0.0, vir=0.0, pyx=0.0, lap=0.0, ovr=False )\n\n # Calculate cell index triplets\n sc = math.floor(box/r_cut) # Number of cells along box edge\n assert sc >= 3, 'System is too small for cells' # Guard against box being too small\n c = np.floor((r+0.5)*sc).astype(np.int_) # N*3 array of cell indices for all atoms\n assert np.all(c>=0) and np.all(c<sc), 'Index error' # Simplistic \"guard\" against roundoff\n\n shift = math.floor(strain*sc) # Strain measured in cell lengths\n\n if fast:\n \n # Build list of arrays, each array holding positions of atoms in a cell\n # At the same time, define a matching set of force arrays in each cell\n # i and j number the atoms in each cell; we do not refer explicitly to indices in r\n rc, fc = [], [] # Initially empty lists of positions and forces\n for ci in product(range(sc),repeat=3): # Triple loop over cells\n mask = np.all(c==ci,axis=1) # Mask identifies atoms in this cell\n rc.append(r[mask,:]) # Copy atom coordinates into array, add to list\n fc.append(np.zeros_like(rc[-1])) # Zero corresponding forces, add to list\n\n for ci1, rci in enumerate(rc): # Loop over i-cells, getting all atoms in each i-cell as an array\n ci = np.unravel_index(ci1,(sc,sc,sc)) # Get i-cell triple-indices\n if rci.size==0: # Handle empty cell\n continue\n\n # Set up correct neighbour cell indices\n if ci[1]==sc-1: # i-cell is in the top layer\n dd = d.copy() # Standard list copied, including extra 3 cells\n dd[5:,0] = d[5:,0] - shift # All those looking up need adjustment in the x direction\n else: # i-cell is not in top layer\n dd = d[:-3,:].copy() # Last three extra cells are not needed; shift is not needed\n \n for dj in dd: # Loop over neighbouring j-cells\n cj = ci + dj # Compute neighbour j-cell triple-indices\n cj1 = np.ravel_multi_index(cj,(sc,sc,sc),mode='wrap') # Convert j-cell to single-index\n rcj = rc[cj1] # Get atoms in j-cell as an array\n if rcj.size==0: # Handle empty cell\n continue\n\n rij = rci[:,np.newaxis,:]-rcj[np.newaxis,:,:] # Separation vectors for all i and j\n rij[:,:,0] = rij[:,:,0] - np.rint(rij[:,:,1])*strain # Extra correction in box=1 units\n rij = rij - np.rint(rij) # PBCs in box=1 units\n rij_sq = np.sum(rij**2,axis=2) # Squared separations\n in_range = rij_sq < r_cut_box_sq # Set flags for within cutoff\n\n if ci1==cj1:\n np.fill_diagonal(in_range,False) # Eliminate i==j when i-cell==j-cell\n np.fill_diagonal(rij_sq,1.0) # Avoid divide-by-zero below\n\n rij_sq = rij_sq * box_sq # Now in sigma=1 units\n rij = rij * box # Now in sigma=1 units\n sr2 = np.where ( in_range, 1.0/rij_sq, 0.0 ) # (sigma/rij)**2, only if in range\n ovr = sr2 > sr2_ovr # Overlap if too close\n sr6 = sr2 ** 3\n sr12 = sr6 ** 2\n pot = sr12 - sr6 # LJ potential (cut but not shifted)\n vir = pot + sr12 # LJ virial\n pot = np.where ( in_range, pot+0.25, 0.0 ) # WCA LJ pair potential (cut-and-shifted)\n lap = ( 22.0*sr12 - 5.0*sr6 ) * sr2 # LJ Laplacian\n fij = vir * sr2 # LJ scalar part of forces\n fij = rij * fij[:,:,np.newaxis] # LJ pair forces\n pyx = rij[:,:,1]*fij[:,:,0] # Off-diagonal element of pressure tensor\n\n if ci1==cj1: # Correct for double-counting ij and ji when i-cell==j-cell\n fij = fij / 2\n total = total + PotentialType ( pot=np.sum(pot)/2, vir=np.sum(vir)/2, \n pyx=np.sum(pyx)/2, lap=np.sum(lap)/2, ovr=np.any(ovr) )\n else:\n total = total + PotentialType ( pot=np.sum(pot), vir=np.sum(vir), \n pyx=np.sum(pyx), lap=np.sum(lap), ovr=np.any(ovr) )\n\n fc[ci1][:,:] = fc[ci1][:,:] + np.sum(fij,axis=1) # Aggregate force on atoms in i-cell\n fc[cj1][:,:] = fc[cj1][:,:] - np.sum(fij,axis=0) # Aggregate force on atoms in j-cell\n\n # Copy forces from list of cell arrays to main force array\n for ci in product(range(sc),repeat=3): # Triple loop over cells\n mask = np.all(c==ci,axis=1) # Mask identifies atoms in this cell\n ci1 = np.ravel_multi_index(ci,(sc,sc,sc),mode='wrap') # Single-index\n f[mask,:] = fc[ci1] # Copy atom forces from correct cell\n\n else:\n \n # Build list of arrays, each array holding indices of atoms in a cell\n # ki and kj are atom indices in the r array; i and j number the atoms in each cell\n k_array = np.arange(n) # Atom indices 0..N-1\n kc = [] # Initially empty list of indices\n for ci in product(range(sc),repeat=3): # Triple loop over cells\n mask = np.all(c==ci,axis=1) # Mask identifies atoms in this cell\n kc.append(k_array[mask]) # Copy atom indices into array, add to list\n\n for ci1, kci in enumerate(kc): # Loop over i-cells, getting atom indices as an array\n ci = np.unravel_index(ci1,(sc,sc,sc)) # Get i-cell triple-indices\n\n # Set up correct neighbour cell indices\n if ci[1]==sc-1: # i-cell is in the top layer\n dd = d # Standard list copied, including extra 3 cells\n dd[5:,0] = dd[5:,0] - shift # All those looking up need adjustment in the x direction\n else:\n dd = d[:-3,:] # Last three extra cells are not needed; shift is not needed\n\n for dj in dd: # Loop over neighbouring j-cells\n cj = ci + dj # Compute neighbour j-cell triple-indices\n cj1 = np.ravel_multi_index(cj,(sc,sc,sc),mode='wrap') # Convert to single-index\n kcj = kc[cj1] # Get indices of atoms in j-cell as an array\n\n for i, ki in enumerate(kci): # Loop over individual atoms in i-cell\n j0 = i+1 if cj1==ci1 else 0 # Only look upwards if i-cell==j-cell\n if j0 >= kcj.size: # Handles (redundantly) empty j-cell and the case \n continue # where j-cell==i-cell and i is last atom\n\n for kj in kcj[j0:]: # Loop over individual atoms in j-cell\n rij = r[ki,:]-r[kj,:] # Separation vector\n rij[0] = rij[0] - np.rint(rij[1])*strain # Extra correction in box=1 units\n rij = rij - np.rint(rij) # Periodic boundary conditions in box=1 units\n rij_sq = np.sum(rij**2) # Squared separation\n\n if rij_sq < r_cut_box_sq: # Check within cutoff\n rij_sq = rij_sq * box_sq # Now in sigma=1 units\n rij = rij * box # Now in sigma=1 units\n sr2 = 1.0 / rij_sq # (sigma/rij)**2\n ovr = sr2 > sr2_ovr # Overlap if too close\n sr6 = sr2 ** 3\n sr12 = sr6 ** 2\n pot = sr12 - sr6 # LJ potential (cut but not shifted)\n vir = pot + sr12 # LJ virial\n pot = pot + 0.25 # WCA LJ potential (cut-and-shifted)\n lap = ( 22.0*sr12 - 5.0*sr6 ) * sr2 # LJ Laplacian\n fij = rij * vir * sr2 # LJ forces\n pyx = rij[1]*fij[0] # Off-diagonal element of pressure tensor\n total = total + PotentialType ( pot=pot, vir=vir, pyx=pyx, lap=lap, ovr=ovr )\n f[ki,:] = f[ki,:] + fij\n f[kj,:] = f[kj,:] - fij\n\n # Multiply results by numerical factors\n f = f * 24.0 # 24*epsilon\n total.pot = total.pot * 4.0 # 4*epsilon\n total.vir = total.vir * 24.0 / 3.0 # 24*epsilon and divide virial by 3\n total.pyx = total.pyx * 24.0 # 24*epsilon\n total.lap = total.lap * 24.0 * 2.0 # 24*epsilon and factor 2 for ij and ji\n \n return total, f",
"def createConstraint(*argv):",
"def build_constraints_boundaries(self):\n\n # Trapezoidal and Hermite-Simpson methods can't compute\n # defects at the last node contrary to pseudospectral methods\n coll_method = self.options['tr_method'] in [\n 'trapezoidal', 'hermite-simpson']\n n_nodes = self.problem.prm['n_nodes'] - \\\n 1 if coll_method else self.problem.prm['n_nodes']\n\n # Defects lower and upper boundaries\n defects_low = np.zeros(\n self.problem.prm['n_states'] * n_nodes)\n defects_upp = np.zeros(\n self.problem.prm['n_states'] * n_nodes)\n\n # Path lower and upper boundaries\n path_low = np.hstack([self.problem.low_bnd.path]\n * (self.problem.prm['n_nodes']))\n path_upp = np.hstack([self.problem.upp_bnd.path]\n * (self.problem.prm['n_nodes']))\n\n # Events lower and upper boundaries\n event_low = self.problem.low_bnd.event\n event_upp = self.problem.upp_bnd.event\n\n # Assembly of the lower and upper boundaries vectors\n low = np.concatenate((defects_low, path_low, event_low))\n upp = np.concatenate((defects_upp, path_upp, event_upp))\n\n return low, upp",
"def makeBinaryChains():\n\t\n\t# retrieve the binding partner specifications\n\t(maxsize,types) = getTypes()\n\t\n\t# Do some basic argument checking for this model\n\tif (len(types) < 2):\n\t\tprint \"Number of defined types must equal two for binary chain calculations.\"\n\t\treturn\n\tif (maxsize == 0):\n\t\tprint \"Must specify a valid maximum number for one or more components.\"\n\t\treturn\n\n\tallChains = []\n\tnewChainsA = [[]]\n\tnewChainsB = []\n\t\n\ttypeA = types[0]\n\ttypeB = types[1]\n\t\n\t# start the chain with a single type A component\n\taddComponent(newChainsA[0],typeA,0,0)\n\n\tdepth = 0\n\tfor n in range(maxsize):\n\t\tdepth+=1\n\t\t\n\t\t# go through all the chains created last iteration and append B components\n\t\tnewChainsB = []\n\t\tfor thisChain in newChainsA:\n\n\t\t\t# get a list of new available sites in the provided chain\n\t\t\t# by setting depth -1, we will only add to components added last round\n\t\t\topenSites = makeSiteList(thisChain,typeB,depth-1)\n\t\t\t\n\t\t\t# make all the descendants from the current chain and append them to the pool\n\t\t\tif (n == 0) and (typeA['sym']): #if the starting binder is symmetric, no need to start chains at all its sites\n\t\t\t\tnewChainsB = newChainsB + fillSites(openSites,thisChain,typeB,-1)\n\t\t\telse:\n\t\t\t\tnewChainsB = newChainsB + fillSites(openSites,thisChain,typeB,depth)\n\t\t\n\t\tprint('n:'+str(n)+', '+str(len(newChainsB))+ ' chains created at depth '+str(depth))\n\t\t\n\t\tallChains = allChains + newChainsB\n\t\t\n\t\tdepth+=1\n\t\t\n\t\t# add an additional component to all the previously modified chains\n\t\tnewChainsA = []\n\t\tfor thisChain in newChainsB:\n\n\t\t\topenSites = makeSiteList(thisChain,typeA,depth-1)\n\t\t\tnewChainsA = newChainsA + fillSites(openSites,thisChain,typeA,depth)\n\t\t\t\n\t\tprint('n:'+str(n)+', '+str(len(newChainsA))+ ' chains created at depth '+str(depth))\n\t\t\n\t\tallChains = allChains + newChainsA\n\n\treturn allChains"
] | [
"0.60946095",
"0.5763195",
"0.5703657",
"0.5553354",
"0.5487165",
"0.5435914",
"0.53954494",
"0.51878697",
"0.51259",
"0.51116204",
"0.5077191",
"0.5054225",
"0.4999024",
"0.49555075",
"0.49334964",
"0.4926704",
"0.4873679",
"0.48712805",
"0.48634586",
"0.48509604",
"0.48481923",
"0.48029104",
"0.47769222",
"0.47725466",
"0.47688386",
"0.47648177",
"0.47571298",
"0.47539604",
"0.47416186",
"0.47258055"
] | 0.66719025 | 0 |
Duplicate a joint chain. | def duplicate_chain(chain, search='', replace='', suffix=''):
if suffix:
suffix = '_'+suffix
new_jnts = []
for joint in chain:
new_name = joint.replace(search, replace, 1)+suffix
new_jnt = mc.duplicate(joint, po=1, n=new_name)[0]
if new_jnts:
mc.parent(new_jnt, new_jnts[-1])
new_jnts.append(new_jnt)
return new_jnts | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def skeleton_buildDuplicateChain(self,sourceJoints = None, modifier = 'rig', connectToModule = False, connectAs = 'rigJoints', connectToSource = None, singleMode = False, cgmType = None, indices = [],blockNames=False):\n _str_func = 'skeleton_buildDuplicateChain'\n \n \n if indices:\n log.debug(\"|{0}| >> Indices arg: {1}\".format(_str_func, indices)) \n l_buffer = []\n for i in indices:\n l_buffer.append(sourceJoints[i])\n sourceJoints = l_buffer \n \n ml_source = cgmMeta.validateObjListArg(sourceJoints,mayaType=['joint'],noneValid=False)\n \n if connectToModule:\n #mRigNull = self.moduleTarget.rigNull\n \n #Get our segment joints\n if singleMode:\n l_jointsExist = connectToModule.getMessage(connectAs)\n else:\n l_jointsExist = connectToModule.msgList_get(connectAs,asMeta = False, cull = True)\n \n if l_jointsExist:\n log.debug(\"|{0}| >> Deleting existing {1} chain\".format(_str_func, modifier)) \n mc.delete(l_jointsExist)\n\n l_joints = mc.duplicate([i_jnt.mNode for i_jnt in ml_source],po=True,ic=True,rc=True)\n \n ml_joints = cgmMeta.validateObjListArg(l_joints,'cgmObject',setClass=True)\n \n if blockNames:\n l_names = skeleton_getNameDicts(self,False,len(l_joints)) \n else:\n l_names = []\n \n for i,mJnt in enumerate(ml_joints):\n if blockNames:\n _d_tmp = l_names[i]\n log.debug(\"|{0}| >> blockName dict {1} | {2}\".format(_str_func, i,_d_tmp)) \n for a in ['cgmIterator','cgmName']:\n if _d_tmp.get(a):\n mJnt.addAttr(a, str(_d_tmp.get(a)),attrType='string',lock=True)\n\n if modifier is not None:\n #l_names[i]['cgmTypeModifier'] = modifier\n mJnt.addAttr('cgmTypeModifier', modifier,attrType='string',lock=True)\n \n if cgmType is False:\n ATTR.delete(mJnt.mNode,'cgmType')\n elif cgmType:\n mJnt.addAttr('cgmType', cgmType,attrType='string',lock=True)\n \n #l_joints[i] = mJnt.mNode\n if connectToSource:\n mJnt.connectChildNode(ml_source[i].mNode,'sourceJoint',\"{0}Joint\".format(connectToSource))#Connect\n \n if mJnt.hasAttr('scaleJoint'):\n if mJnt.scaleJoint in ml_skinJoints:\n int_index = ml_source.index(mJnt.scaleJoint)\n mJnt.connectChildNode(ml_source[int_index],'scaleJoint','sourceJoint')#Connect\n\n #Name loop\n ml_joints[0].parent = False\n for i,mJnt in enumerate(ml_joints):\n #mJnt.rename(NAMETOOLS.returnCombinedNameFromDict(l_names[i]))\n mJnt.doName()\t\n \n if connectToModule:\n if singleMode:\n connectToModule.connectChildNode(ml_joints[0],connectAs,'rigNull')\n else:\n connectToModule.msgList_connect(connectAs, ml_joints,'rigNull')#connect\t\n log.debug(ml_joints)\n return ml_joints",
"def skeleton_duplicateJoint(self,sourceJoints = None, modifier = 'rig', connectToModule = False, connectAs = 'rigJoints', connectToSource = 'skinJoint', singleMode = False, cgmType = None, indices = [],blockNames=False):\n _str_func = 'skeleton_buildDuplicateChain'\n \n \n if indices:\n log.debug(\"|{0}| >> Indices arg: {1}\".format(_str_func, indices)) \n l_buffer = []\n for i in indices:\n l_buffer.append(sourceJoints[i])\n sourceJoints = l_buffer \n \n ml_source = cgmMeta.validateObjListArg(sourceJoints,mayaType=['joint'],noneValid=False)\n \n if connectToModule:\n #mRigNull = self.moduleTarget.rigNull\n \n #Get our segment joints\n if singleMode:\n l_jointsExist = connectToModule.getMessage(connectAs)\n else:\n l_jointsExist = connectToModule.msgList_get(connectAs,asMeta = False, cull = True)\n \n if l_jointsExist:\n log.debug(\"|{0}| >> Deleting existing {1} chain\".format(_str_func, modifier)) \n mc.delete(l_jointsExist)\n\n l_joints = mc.duplicate([i_jnt.mNode for i_jnt in ml_source],po=True,ic=True,rc=True)\n \n ml_joints = [cgmMeta.cgmObject(j) for j in l_joints]\n\n if blockNames:\n l_names = skeleton_getNameDicts(self,False,len(l_joints)) \n else:\n l_names = []\n \n for i,mJnt in enumerate(ml_joints):\n if blockNames:\n _d_tmp = l_names[i]\n log.debug(\"|{0}| >> blockName dict {1} | {2}\".format(_str_func, i,_d_tmp)) \n for a in ['cgmIterator','cgmName']:\n if _d_tmp.get(a):\n mJnt.addAttr(a, str(_d_tmp.get(a)),attrType='string',lock=True)\n\n if modifier is not None:\n #l_names[i]['cgmTypeModifier'] = modifier\n mJnt.addAttr('cgmTypeModifier', modifier,attrType='string',lock=True)\n if cgmType is not None:\n #l_names[i]['cgmType'] = cgmType \n mJnt.addAttr('cgmType', cgmType,attrType='string',lock=True)\n \n #l_joints[i] = mJnt.mNode\n if connectToSource:\n mJnt.connectChildNode(ml_joints[i].mNode,connectToSource,'{0}Joint'.format(modifier))#Connect\n \n if mJnt.hasAttr('scaleJoint'):\n if mJnt.scaleJoint in ml_skinJoints:\n int_index = ml_source.index(mJnt.scaleJoint)\n mJnt.connectChildNode(ml_source[int_index],'scaleJoint','sourceJoint')#Connect\n\n #Name loop\n ml_joints[0].parent = False\n for i,mJnt in enumerate(ml_joints):\n #mJnt.rename(NAMETOOLS.returnCombinedNameFromDict(l_names[i]))\n mJnt.doName()\t\n \n if connectToModule:\n if singleMode:\n connectToModule.connectChildNode(ml_joints[0],connectAs,'rigNull')\n else:\n connectToModule.msgList_connect(connectAs, ml_joints,'rigNull')#connect\t\n return ml_joints",
"def clone(self, *args):\n return _osgAnimation.Bone_clone(self, *args)",
"def clone(self):",
"def addChain(self, chain):\n\n\t\tself.chain.append(chain)\n\t\tchain.parentMolecule = self",
"def duplicate(self, to_robot=None):\n _robot = self._related_robot_instance\n self.unlink_from_robot()\n out = deepcopy(self)\n if _robot is not None:\n self.link_with_robot(_robot)\n if to_robot is not None:\n out.link_with_robot(to_robot)\n return out",
"def __copy__(self):\n #new = MCTS(copy=True) # don't run _predict() twice\n new = MCTS(self.env, copy=True) # don't set pi and Q twice\n new.env = self.env.__copy__()\n # can't use __dict__.update() without effecting env __copy__()\n # in theory, you don't need to copy the env. just use one copy for simulating, and restore it to root\n # since _Q() evaluates the env.done() of children, you need self.done = env.done() in __init__()\n # same for env.winner\n new.pi = []\n new. Q = 0\n new.net = self.net\n new.t = self.t\n new.expl = self.expl\n new.children = []\n new.parent = None\n return new",
"def copy(self):\n return self.__class__(\n self.kind, self.link_ids.copy(), self.included_nodes.copy(), self.mass,\n self.name, self.crossring_cleavages.copy(), self.composition.copy())",
"def clone(self):\n screen = self.screen\n self._newLine(self._drawing)\n\n Myturtle = self.Myturtle\n self.screen = None\n self.Myturtle = None # too make self deepcopy-able\n\n q = deepcopy(self)\n\n self.screen = screen\n self.Myturtle = Myturtle\n\n q.screen = screen\n q.Myturtle = _TurtleImage(screen, self.Myturtle.shapeIndex)\n\n screen._turtles.append(q)\n ttype = screen._shapes[self.Myturtle.shapeIndex]._type\n if ttype == \"polygon\":\n q.Myturtle._item = screen._createpoly()\n elif ttype == \"image\":\n q.Myturtle._item = screen._createimage(screen._shapes[\"blank\"]._data)\n elif ttype == \"compound\":\n q.Myturtle._item = [screen._createpoly() for item in\n screen._shapes[self.Myturtle.shapeIndex]._data]\n q.currentLineItem = screen._createline()\n q._update()\n return q",
"def clone(self):\n return _libsbml.Association_clone(self)",
"def T_joint_chain(self, joint_name):\n if self.joint_syms[joint_name].get(\"T_joint\") is None:\n # go up the parent chain of transformations\n parent_joint_name = self.global_syms[\"Jname2parentJname\"].get(\n joint_name)\n if parent_joint_name is None:\n self.joint_syms[joint_name][\"T_joint\"] = \\\n self.joint_syms[joint_name][\"Tlocal_joint\"]\n else:\n self.joint_syms[joint_name][\"T_joint\"] = (\n self.T_joint_chain(parent_joint_name)\n * self.joint_syms[joint_name][\"Tlocal_joint\"]\n )\n return self.joint_syms[joint_name][\"T_joint\"]",
"def copy(self):\n new = self.__class__()\n do_not_copy_by_ref = {\"alleles\", \"strains\", \"base_cobra_model\", \"notes\",\n \"annotation\"}\n for attr in self.__dict__:\n if attr not in do_not_copy_by_ref:\n new.__dict__[attr] = self.__dict__[attr]\n new.notes = deepcopy(self.notes)\n new.annotation = deepcopy(self.annotation)\n\n new.alleles = DictList()\n do_not_copy_by_ref = {\"_strains\", \"_model\"}\n for allele in self.alleles:\n new_allele = allele.__class__()\n for attr, value in iteritems(allele.__dict__):\n if attr not in do_not_copy_by_ref:\n new_allele.__dict__[attr] = copy(\n value) if attr == \"formula\" else value\n new_allele._model = new\n new.alleles.append(new_allele)\n\n new.strains = DictList()\n do_not_copy_by_ref = {\"_model\", \"_alleles\", \"_base_cobra_model\"}\n for strain in self.strains:\n new_strain = strain.__class__()\n for attr, value in iteritems(strain.__dict__):\n if attr not in do_not_copy_by_ref:\n new_strain.__dict__[attr] = copy(value)\n new_strain._model = new\n new.strains.append(new_strain)\n # update awareness\n for allele, stoic in iteritems(strain._alleles):\n new_allele = new.alleles.get_by_id(allele.id)\n new_strain._alleles[new_allele] = stoic\n new_allele._strain.add(new_strain)\n # it doesn't make sense to retain the context of a copied model so\n # assign a new empty context\n new._contexts = list()",
"def copy(self, newname=None):\n\n if not newname: newname = self.name + \"_copy\"\n newmol=Protein(name=newname, parent=self.parent,\n elementType=self.elementType, childrenName=self.childrenName,\n setClass=self.setClass, childrenSetClass=self.childrenSetClass,\n top=self.top)\n newmol.curChain=Chain()\n newmol.curRes=Residue()\n newmol.allAtoms= AtomSet()\n newmol.parser = self.parser\n for at in self.allAtoms:\n self._fit_atom_into_tree(newmol, at)\n newmol.buildBondsByDistance()\n return newmol",
"def __deepcopy__(self, memo):\n chain = Chain(model_id = self.model_id,\n chain_id = self.chain_id)\n for fragment in self.fragment_list:\n chain.add_fragment(copy.deepcopy(fragment, memo), True)\n return chain",
"def deep_copy(self):\n return self.__class__(self.inputs, self.outputs, self.middle)",
"def op_dup(self, args):\n self.require_stack(1)\n self.stack.append(self.stack[-1])",
"def duplicate(self):\n\t\treturn Graph(self.vertices[:], self.edges[:])",
"def trip_chain(self):\n pass",
"def clone(self):\n return self.__class__(self.name, *self)",
"def duplicate(*args, inputConnections: bool=True, instanceLeaf: bool=True, name: AnyStr=\"\",\n parentOnly: bool=True, renameChildren: bool=True, returnRootsOnly: bool=True,\n smartTransform: bool=True, transformsOnly: bool=True, upstreamNodes: bool=True,\n **kwargs)->List[AnyStr]:\n pass",
"def clone(self):\n newlist = []\n for a in self.actors:\n newlist.append(a.clone())\n return Assembly(newlist)",
"def clone(self):\n return _libsbml.XMLTriple_clone(self)",
"def clone(self):\n return _libsbml.GeneProductAssociation_clone(self)",
"def copy(self):\n model_copy = BayesianModel()\n model_copy.add_nodes_from(self.nodes())\n model_copy.add_edges_from(self.edges())\n if self.cpds:\n model_copy.add_cpds(*[cpd.copy() for cpd in self.cpds])\n return model_copy",
"def clone(self, replica=None):\n\n\t\tif replica == None:\n\t\t\treplica = Molecule()\n\n\t\treplica.copy(self)\n\n\t\tfor chain in self.chain:\n\t\t\tnewchain = chain.clone()\n\t\t\treplica.addChain(newchain)\n\n\t\treturn replica",
"def copy(self):\n new_chain = []\n for block in self.chain:\n if block.index == 0:\n new_chain.append(self.create_genesis())\n else:\n new_block = Block()\n new_block.deserialize(block.serialize())\n new_chain.append(new_block)\n\n return BlockChain(new_chain)",
"def test_deepcopy(self):\n t = Compose([Enumerate([2, \"asfa\", \"ipsi\"]), OneHotEncode(3)], \"categorical\")\n t.transform([2])\n copy.deepcopy(t)",
"def W_joint_chain(self, joint_name):\n if self.joint_syms[joint_name].get(\"W\") is None:\n # go up the parent chain of transformations\n parent_joint_name = self.global_syms[\"Jname2parentJname\"].get(\n joint_name)\n if parent_joint_name is None:\n self.joint_syms[joint_name][\"W\"] = \\\n self.joint_syms[joint_name][\"q_rpy\"]\n else:\n self.joint_syms[joint_name][\"W\"] = (\n self.W_joint_chain(parent_joint_name)\n + self.joint_syms[joint_name][\"q_rpy\"]\n )\n return self.joint_syms[joint_name][\"W\"]",
"def polyDuplicateAndConnect(*args, removeOriginalFromShaders: bool=True, renameChildren:\n bool=True, **kwargs)->None:\n pass",
"def clone(self):\n raise NotImplementedError"
] | [
"0.63030636",
"0.6269034",
"0.60625374",
"0.58090156",
"0.5777003",
"0.57204974",
"0.56585926",
"0.55766195",
"0.5560866",
"0.55120313",
"0.5510162",
"0.54400694",
"0.5433781",
"0.54283494",
"0.5389344",
"0.5383912",
"0.53801215",
"0.53679395",
"0.5365678",
"0.5365056",
"0.53536135",
"0.53531176",
"0.5330423",
"0.5328828",
"0.5323335",
"0.5277563",
"0.52724487",
"0.52718264",
"0.52692497",
"0.5265826"
] | 0.6813391 | 0 |
This function loops through directory and updates dates of files in said directory. | def update_date(dest=dest):
for root, _, files in os.walk(dest):
ignore = ["README.md","SUMMARY.md"]
_ = [edit_files(root + "/" + file) for file in files if (file not in ignore and file.endswith(".md"))] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _update_files():\n configuration_settings = get_configuration()\n\n # Need to find all of the files that are stored in the input_files directories in order to start building the\n # reports that will be used to generate the static log files.\n for input_path in configuration_settings.processing.inputs:\n search_path = pathlib.Path(input_path)\n\n # Currently going to make the assumption that everyone is using the path naming convention that I'm dictating\n # which is YYYY/MM/DD/file.ext\n for file_component in search_path.glob('*/*/*/*'):\n # Store all of the files into a dictionary containing the keys and a list of the files that are associated\n # with that day\n updaters.update_files(search_path, file_component)",
"def upload_all_workout_from_directory(directory_path):\n day = datetime.date.today() - datetime.timedelta(days=datetime.date.today().weekday(), weeks=1)\n for root, dirs, files in os.walk(directory_path):\n for f in files:\n print(f)\n upload_workout_from_directory(os.path.relpath(os.path.join(root, f), \".\"), get_next_monday(day))\n day = get_next_monday(day)",
"def run(self):\n super().run()\n date_subdirs = sorted(self.list_directory(self.input_location,\n self.input_location_type))\n for date_subdir in date_subdirs:\n if not re.search(\"^([\\d]{4}-[\\d]{2}-[\\d]{2})\", date_subdir):\n print(\"{}: Directory name {} not in YYYY-MM-DD format\"\\\n .format(self.name, date_subdir))\n continue\n date_path = os.path.join(self.input_location, date_subdir, \"RAW\")\n if len(self.list_directory(date_path, self.input_location_type)) == 0:\n continue\n processed_ok = self.process_single_date(date_path)\n if not processed_ok:\n continue",
"def iterate_dir(dir_path:str, files, equipt_nr):\n for ii in os.listdir(dir_path):\n if os.path.isdir(ii):\n iterate_dir(ii)\n elif re.search('[0-9]{7}', ii):\n rename_file(ii, equipt_nr)\n else:\n print('not editing : ' + ii)",
"def update(self):\n if os.path.isdir(self.full_path):\n self.file_list = os.listdir(self.full_path)\n else:\n self.file_list = []",
"def getFilesToProcess(directoryPath, nextDateToProcess) :\n fileNames = []\n for (dirpath, dirnames, filenames) in os.walk(directoryPath) :\n fileNames.extend(filenames)\n break\n\n nextDate = datetime.datetime.strptime(nextDateToProcess, \"%Y-%m-%d\")\n filesToProcess = []\n for fileName in fileNames :\n tokens = fileName.split('_')\n lastToken = tokens[len(tokens) - 1]\n tokens = lastToken.split('.')\n dateTimeString = tokens[0]\n dateTimeObj = datetime.datetime.strptime(dateTimeString, \"%Y-%m-%d-%H-%M\")\n if dateTimeObj.date() == nextDate.date() :\n filesToProcess.append(fileName)\n\n return filesToProcess",
"def main():\r\n parent_dir = 'D:\\\\Profession\\\\Intern\\\\Assignments\\\\Codes\\\\Assignement Codes\\\\Part 2\\\\data_dumps'\r\n\r\n if not (os.path.isdir(parent_dir)):\r\n raise Exception(\"The directory doesn't exist\")\r\n\r\n directories = []\r\n\r\n for directory in os.listdir(parent_dir):\r\n directories.append(os.path.join(parent_dir, directory))\r\n\r\n # The group_dic represents the dictionary with keys equal to the unique dates in the directories\r\n # And the values represent a list of all files that have the same date prefix across the data_dumps\r\n group_dic = grouping(directories)\r\n\r\n # Moving Files into New Directory\r\n move(group_dic, parent_dir)\r\n print(\"Files Moved Successfully\")",
"def count_files_loop(self, dirpath):\n for i in os.listdir(dirpath):\n if i[0] == '.':\n continue\n elif os.path.isdir(dirpath + i):\n self.count_files_loop(dirpath + i + '/')\n elif os.path.isfile(dirpath + i):\n self.file_count += 1\n else:\n print dirpath + i, 'does not exist'\n return",
"def update_source_files(source_directory_list, source_extension_list):\n # get source files in the directory list\n source_total = 0\n for unused, source_directory in enumerate(source_directory_list):\n source_files_list = []\n get_requested_files(source_directory, source_extension_list, source_files_list)\n # update the files with shared object references\n for unused, source_file in enumerate(source_files_list):\n updated_file = []\n file_changed = modify_input_file(source_file, updated_file)\n if file_changed:\n filepath = get_printble_filepath(source_file)\n print(filepath)\n source_total += 1\n if __file_update:\n write_output_file(updated_file, source_file)\n print(\"Total Files\", source_total)\n print()",
"def parse_dir(self, directory):\n for dir in os.listdir(directory):\n if dir in ['.git', '.github', '.vscode', 'docs']:\n continue\n next_dir = os.path.join(directory, dir)\n if os.path.isdir(next_dir):\n if dir.startswith('template_'):\n self.parse_template(next_dir)\n else:\n normpath = os.path.relpath(next_dir)\n normpath = os.path.normpath(normpath)\n path = normpath.split(os.sep)\n self.add_folder(path)\n # add_directory(next_dir)\n self.parse_dir(next_dir)",
"def process_dir(self, src_dir, dst_dir):\n self.logger.tree(src_dir)\n for srcpath in self.list_all_files(src_dir):\n dstpath = srcpath.replace(src_dir, dst_dir)\n # TODO: Can we clean up the way we handle relative_path?\n # Relative path is here so that when we print files in the log it\n # shows only the file's path. Should we just pass it to the logger\n # when we create it? Or let the logger figure it out?\n # relative_path = srcpath.replace(src_dir + '/', '')\n self.cur_file = File(srcpath, dstpath, self.logger)\n self.process_file(self.cur_file)",
"def dolibupdate(root, subdir):\n\n global fileCount, grooveCount, gdDate, grooveDB, processedFiles, mkGrooveList\n\n db = grooveDB[0][1]\n\n if subdir == '.':\n print \"Skipping: '.'\"\n return\n\n if subdir:\n print \" Processing library directory '%s'.\" % subdir\n\n\n \"\"\" Get a list of the files in this directory. If the list\n includes a file called 'MMAIGNORE' the entire directory\n (and subdirs) is ignored. Otherwise, each file in the\n directory ending in 'mma' is parsed for groove defs.\n \"\"\"\n\n p = os.path.join(root,subdir)\n dirfiles = os.listdir(p)\n\n if \"MMAIGNORE\" in dirfiles:\n print \"Skipping: %s\" % p\n return\n\n for fn in sorted(dirfiles):\n\n # Ignore hidden files and emacs auto-save and dead.\n\n if fn.startswith('.') or fn.startswith('#'):\n continue\n\n f=os.path.join(root, subdir, fn) # Create full path name\n\n if os.path.isdir(f):\n dolibupdate(root, os.path.join(subdir,fn)) # recursive!\n\n elif f.endswith(gbl.ext):\n ename = os.path.join(subdir, fn)\n\n processedFiles.append(ename)\n \n if gdDate and ename in db and os.path.getmtime(f) < gdDate:\n print \" Existing: %s\" % f\n grooveCount += len(db[ename])\n continue\n\n if ename in db:\n print \" Updating: %s\" % f\n else:\n print \" Creating: %s\" % f\n mkGrooveList = []\n MMA.grooves.grooveClear([])\n gbl.mtrks = {}\n MMA.swing.mode = 0\n for c in gbl.midiAssigns.keys():\n gbl.midiAssigns[c]=[]\n for a,v in enumerate(gbl.midiAvail):\n gbl.midiAvail[a]=0\n gbl.mtrks[0]=MMA.midi.Mtrk(0)\n\n gbl.tnames = {}\n\n MMA.parse.parseFile(f) # read current file, grab grooves\n\n fileCount += 1 # just so we can report to user\n grooveCount += len(mkGrooveList)\n db[ename]=mkGrooveList\n\n else:\n if not f.endswith(mmadir):\n print \" Ignoring: %s\" % f",
"def add_timestamps(dir_video):\n print(\"Adding creation dates to file names\")\n os.chdir(dir_video)\n # get only top level dir info\n dir_data_video_files = next(os.walk(dir_video))\n list_video_files = dir_data_video_files[2] # get file list\n for f_name in list_video_files:\n if GOPRO_PATTERN.search(f_name):\n f_time = time.strftime(r\"%Y-%m-%d_%H-%M\", time.localtime(os.path.getctime(f_name)))\n os.rename(f_name, f\"{f_time}_{f_name}\")",
"def updateIndex(self):\n for root, dirs, files in os.walk(self.serverdir):\n for d in dirs:\n if not d.startswith('.'):\n relpath = os.path.relpath(os.path.join(root, d), self.serverdir)\n self.serverindex[relpath] = (self.getNametype(os.path.join(root,d)), os.path.getmtime(os.path.join(root, d)))\n for f in files:\n if not f.startswith('.'):\n relpath = os.path.relpath(os.path.join(root, f), self.serverdir)\n self.serverindex[relpath] = (self.getNametype(os.path.join(root,f)), os.path.getmtime(os.path.join(root, f)))",
"def updateCache(self):\n for root, dirs, files in os.walk(cachedFilesPath):\n for file in files:\n if file.endswith(cachedFileExtensionSuffix):\n path = os.getcwd()+'/'+cachedFilesPath+file\n with open(path, mode='r') as f:\n payload_json = f.read()\n payload_obj=jsonpickle.decode(payload_json)\n r= self.upload(payload_obj)\n if isinstance(r, types.NoneType):\n #do nothing\n print(\"\")\n else:\n if r.status_code == 200 :\n #uploaded!\n if cacheArhive:\n #move it to archive\n dst=os.getcwd()+'/'+cachedArchivePath+file\n shutil.move(path, dst)\n print(\"archived log: \", file)\n else:\n #delete it\n os.remove(path)",
"def do_merge_all():\n for rawd, merged in TOMERGE:\n mylogger.info(\"cleaning \" + merged)\n ensure_dir(merged)\n cleandir(merged)\n mylogger.info(\"merging \" + rawd + \" to \" + merged)\n build_merged_dir(build_sensor_file_map(rawd), merged)\n\n # add timestamp file\n\tf = open(TIMESTAMP_FILE,\"w\")\n\tf.write(str(datetime.datetime.now()))\n\tf.close()",
"def scan_dir(self, dir):\n import pathlib\n import magic\n\n for filename in find_all_files(dir):\n self.filelist.append({\n \"filename\": filename,\n \"mime\": magic.from_file(filename, mime=True),\n \"size_bytes\": os.path.getsize(filename),\n \"ext\": pathlib.Path(filename).suffix\n })",
"def update_reports():\n return os.listdir('./reports')",
"def _update_subfiles(self) -> None:\n\t\t# Clear list of subfiles\n\t\tself.subfiles.clear()\n\t\t# Iterate over Nodes\n\t\tfor node in self.nodes:\n\t\t\tfor file in node.get_subfiles():\n\t\t\t\tself.subfiles.add(\"{}/{}\".format(self.xml_dir, file))\n\t\t# Iterate over SubNodes\n\t\tfor subnode in self.subnodes:\n\t\t\tfor file in subnode.filenames:\n\t\t\t\tself.subfiles.add(\"{}/{}\".format(self.xml_dir, file))",
"def calibrate_directory(self, directory, custom_file, out_dir, overwrite_rad, overwrite_ref):\n self.total_files = sum([len(files) for r, d, files in os.walk(directory)])\n self.current_file = 1\n try:\n for file_name in os.listdir(directory):\n # check each file in directory (file or subdirectory?)\n full_path = os.path.join(directory, file_name)\n if os.path.isdir(full_path) and full_path is not out_dir:\n # recursive call for each subdirectory\n self.calibrate_directory(os.path.join(directory, file_name), custom_file, out_dir,\n overwrite_rad, overwrite_ref)\n else:\n # calibrate each file individually\n self.calibrate_file(full_path, custom_file, out_dir, overwrite_rad, overwrite_ref)\n self.current_file += 1\n self.update_progress()\n except FileNotFoundError:\n print(directory + \": directory does not exist.\")\n with open(self.logfile, 'a+') as log:\n log.write(directory + ': relative reflectance input - directory does not exist \\n')\n if self.main_app is not None:\n raise InputFileNotFoundException(directory)\n self.update_progress(100)",
"def save(self,\n directory,\n dir_pattern=None,\n file_pattern=\"{accession_number}\",\n download_all=False,\n daily_date_format=\"%Y%m%d\"):\n for (year, quarter, f) in self.quarterly_date_list:\n self.quarterly.year = year\n self.quarterly.quarter = quarter\n self.quarterly.entry_filter = lambda x: f(x) and self.entry_filter(x)\n self.quarterly.save(directory=directory,\n dir_pattern=dir_pattern,\n file_pattern=file_pattern,\n download_all=download_all)\n\n for d in self.daily_date_list:\n self.daily.date = d\n try:\n self.daily.save(directory=directory,\n dir_pattern=dir_pattern,\n file_pattern=file_pattern,\n download_all=download_all,\n date_format=daily_date_format)\n except (EDGARQueryError, NoFilingsError):\n pass",
"def recursively_rename_files():\n ordered_equipts = get_directory_definition()\n\n # Iterates each equipement folder\n for ii in ordered_equipts:\n iterate_dir(ii, ordered_equipts.index(ii))",
"def update_directory(dir):\n print('Updating {dir}...'.format(dir=dir))\n os.chdir(dir)\n status = 1\n tries = 0\n # Keep trying to git pull until success, or until tries = 30\n while status != 0:\n status = os.system('sudo git pull')\n tries += 1\n if status != 0:\n print('Trying again. Attempt {0} out of 30.'.format(tries))\n if tries == 30:\n break\n if status == 0:\n print('Succeeded.')\n else:\n print('Failed after 30 tries.')",
"def _walk_dir(self, rootpath):\n assert os.path.isabs(rootpath)\n assert rootpath not in self._dirs\n relpath = self._get_rel_path(rootpath)\n self._dirs[relpath] = Directory(rootpath, relpath, None)\n for dirpath, dirnames, filenames in os.walk(rootpath):\n if 'refdata' in dirnames:\n dirnames.remove('refdata')\n currentdir = self._dirs[self._get_rel_path(dirpath)]\n # Loop through a copy so that we can modify dirnames.\n for dirname in list(dirnames):\n fullpath = os.path.join(dirpath, dirname)\n if fullpath == self._build_root:\n dirnames.remove(dirname)\n continue\n relpath = self._get_rel_path(fullpath)\n self._dirs[relpath] = Directory(fullpath, relpath, currentdir)\n extensions = ('.h', '.cuh', '.hpp', '.c', '.cc', '.cpp', '.cu', '.bm')\n for filename in filenames:\n basename, extension = os.path.splitext(filename)\n if extension in extensions:\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n self._files[relpath] = File(fullpath, relpath, currentdir)\n elif extension == '.cmakein':\n extension = os.path.splitext(basename)[1]\n if extension in extensions:\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n sourcefile = GeneratorSourceFile(fullpath, relpath, currentdir)\n self._files[relpath] = sourcefile\n fullpath = os.path.join(dirpath, basename)\n relpath = self._get_rel_path(fullpath)\n fullpath = os.path.join(self._build_root, relpath)\n generatedfile = GeneratedFile(fullpath, relpath, currentdir)\n self._files[relpath] = generatedfile\n generatedfile.set_generator_source(sourcefile)\n elif extension in ('.l', '.y', '.pre'):\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n self._files[relpath] = GeneratorSourceFile(fullpath, relpath, currentdir)",
"def preprocess_dir(self, manifest_dir, content_property, kwargs=None):\n # Start the timer\n start = time.time()\n # Walk the directory and preprocess each file\n all_files = [\n os.path.join(r, file)\n for r, d, f in os.walk(manifest_dir)\n for file in f\n if file.endswith(\".json\") and not file.startswith(\"._\")\n ]\n for file in all_files:\n file = file.replace(\"\\\\\", \"/\") # Handle Windows paths\n tmp = file.split(\"/\")\n path = \"/\".join(tmp[:-1])\n filename = tmp[-1]\n self.preprocess(path, filename, content_property, kwargs=None)\n # Print time to completion\n end = time.time()\n t = end - start\n print(\"Processed all files in \" + str(t) + \" seconds.\")",
"def readDirectory():\n tagdir = \"tagreplacements\"\n data = os.listdir(tagdir)\n for d in data:\n processFile(os.path.join(tagdir,d))\n \n #print(repd)",
"def find_identical_files(directory):\n # go to the directory\n os.chdir(directory)\n \n # the problem wiht the md5 in our scan is that it causes the access time to be\n # updated. This renders future scans of the directory when looking for old files\n # to see them no older than the last scan. An approach to get around this would\n # be to retrieve the access times for all the files using the stat command\n # then use touch reset the access time to the original. This may change other\n # time stats too need to look in that. Here is a command set example for\n # changing the access times using touch:\n\n # addressing access times\n \n # 1 - fetch all the previous accesstimes\n try:\n find_stat = subprocess.Popen(\"find * -exec stat '{}' \\;\",shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n except:\n raise\n # get the standard output\n out, err = find_stat.communicate() # get the standard output\n fstats = out.decode().split(\"\\n\") # split the text into a list\n fdates = {}\n for s in fstats:\n # parse stat output lines appear as follows:\n #16777220 1001760 -rw-r--r-- 1 todd staff 0 7 \"Jan 25 22:07:00 2015\" \"Jan 25 22:00:07 2015\" \"Jan 25 22:09:51 2015\" \"Jan 25 22:00:07 2015\" 4096 8 0 bar.txt\n if s == \"\":\n continue\n at = re.search(\"\\\"[^\\\"]+\\\"\",s).group(0)\n at = at.strip('\"')\n dspec = file_date_to_spec(at)\n #ss = s.split(\" \")\n ss = re.split(\"\\s+\",s)\n fn = \" \".join(ss[27:])\n fdates[fn] = dspec\n \n\n # get the md5 sums for each file...the side effect is the access time changes...but we repair these \n file_by_md5 = {}\n for fn in fdates.keys():\n \n # run md5 sum and get the value in a dict\n try:\n cmd_md5 = subprocess.Popen(\"md5 \"+fn,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n except:\n raise\n out, err = cmd_md5.communicate() # get the standard output\n md5 = out.decode() # split the text into a list\n md5 = md5.rstrip()\n if md5 == '':\n continue\n p = re.split(\"\\) = \",md5)\n if len(p) < 2:\n print(\"Failed to split \"+f)\n fnn = re.sub(\"MD5 \\(\",\"\",p[0])\n if fnn != fn:\n print(\"The file returned by md5 was not was not what was expected: \"+fnn)\n print(\"Expected: \"+fn)\n if file_by_md5.__contains__(p[1]):\n file_by_md5[p[1]] += [ fn ]\n else:\n file_by_md5[p[1]] = [ fn ]\n \n # repair access time using touch command e.g.:\n # /usr/bin/touch -a -t 201501252207.30 bar.txt\n tch = \"/usr/bin/touch -a -t \"+fdates[fn]+\" \"+fn\n return_signal = subprocess.call(tch.split())\n if return_signal != 0:\n print(\"Could not run command \"+tch)\n sys.exit()\n \n # create our dict of list of files keyed by md5 sums\n identical = {}\n for md5 in file_by_md5.keys():\n if len(file_by_md5[md5]) == 1:\n continue\n identical[md5] = file_by_md5[md5]\n \n # go back to our starting directory \n os.chdir(iwd)\n \n return(identical)",
"def changeDate(names, date, ctlFunc = lambda s, d: True): \n\n # parse date\n try:\n day, month, year = re.fullmatch(\"(\\d\\d)(\\d\\d)(\\d\\d\\d\\d)\", date).groups()\n except AttributeError as e:\n raise\n \n # convert strings to ints\n day = int(day)\n month = int(month)\n year = int(year)\n \n for name in names:\n\n if ctlFunc(name, \"*DATE*\"):\n\n # get HH MM SS from file\n p_timestamp = os.path.getmtime(name)\n mdt = datetime.datetime.fromtimestamp(p_timestamp)\n \n # construct new datetime object with file time and provided date\n mdt = datetime.datetime(year, month, day, mdt.hour, mdt.minute, mdt.second)\n\n # change to new file timestamp by passing in datetime.timestamp() \n os.utime(name, (mdt.timestamp(), mdt.timestamp()))",
"def count_files_md5hash_indir(self, dir_path):\n for file_name in os.listdir(dir_path):\n file_path = \"{}/{}\".format(dir_path, file_name)\n self.md5hash.add(count_md5hash_file(file_path))",
"def updateBaseFiles(self):\n for filename, filetype in self._get_base_files():\n lines = open(filename).readlines()\n\n if self.Verbose:\n print 'Reading %s' % filename\n\n if filetype is 'Python':\n lines, write_out = self._update_python_file(lines, filename) \n elif filetype is 'Properties':\n lines, write_out = self._update_properties_file(lines,filename)\n else:\n raise TypeError, \"Unknown base file type %s\" % filetype\n\n if write_out:\n self._file_writer(lines, filename)"
] | [
"0.7497474",
"0.65693516",
"0.6360544",
"0.6272613",
"0.62687546",
"0.6217366",
"0.58838624",
"0.58187634",
"0.5807831",
"0.57809776",
"0.57797885",
"0.57654995",
"0.5759113",
"0.57328516",
"0.570991",
"0.56900525",
"0.56631005",
"0.56558955",
"0.5641615",
"0.5629182",
"0.5624835",
"0.5609571",
"0.55798995",
"0.5569067",
"0.55629313",
"0.5555831",
"0.55514437",
"0.55399776",
"0.55278116",
"0.5514329"
] | 0.7363431 | 1 |
Test combining each center's file errors | def test__combine_center_file_errors(syn):
expected_error = (
f"\t{ENT1.name} ({ENT1.id}):\n\nmy errors\nn\n\n"
f"\t{ENT1.name} ({ENT1.id}):\n\nerrors here\nf\n\n"
)
calls = [
mock.call("syn1234", downloadFile=False),
mock.call("syn2345", downloadFile=False),
]
with patch.object(syn, "get", return_value=ENT1) as patch_synget:
center_errors = write_invalid_reasons._combine_center_file_errors(
syn, CENTER_ERRORSDF
)
assert center_errors == expected_error
patch_synget.assert_has_calls(calls) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_single_error_merge(self):\n test_folder = base_path +'/test_data/merging_tests/error_test/'\n output_file = os.path.join(test_folder, \"output1.jpg\")\n\n self.assertRaises(mi.ImageError, lambda: mi.add_background(test_folder+\"dummy.txt\", test_folder+\"background.jpg\", output_file))\n self.assertRaises(mi.ImageError, lambda: mi.add_background(test_folder+\"render_small.png\", test_folder+\"background.jpg\", output_file))\n self.assertRaises(mi.ImageError, lambda: mi.add_background(test_folder+\"render1.png\", test_folder+\"dummy.txt\", output_file))\n self.assertRaises(mi.ImageError, lambda: mi.add_background(test_folder+\"render1.png\", test_folder+\"background_small.jpg\", output_file))\n self.assertRaises(mi.ImageError, lambda: mi.add_background(test_folder+\"render1.png\", test_folder+\"background_large.jpg\", output_file))",
"def test_get_center_invalid_errors(syn):\n with patch.object(\n syn, \"tableQuery\", return_value=QueryResponse\n ) as patch_query, patch.object(\n write_invalid_reasons, \"_combine_center_file_errors\", return_value=\"errors\"\n ) as patch_combine:\n center_invalid = write_invalid_reasons.get_center_invalid_errors(syn, \"syn3333\")\n assert center_invalid == {\"SAGE\": \"errors\", \"TEST\": \"errors\"}\n patch_query.assert_called_once_with(\"SELECT * FROM syn3333\")\n assert patch_combine.call_count == 2",
"def test_strain_not_in_two_files(generate_no_strain_one_file):\n fname = generate_no_strain_one_file\n with pytest.raises(Exception):\n process_files([fname, fname])",
"def generate_second_list_corrupted_files(directory):\n \n paths = [\"test\", \"dev\", \"train\"]\n corrupted_files = []\n\n for path in paths:\n files = [\n f\n for f in listdir(join(directory, path))\n if isfile(join(directory, path, f))\n ]\n\n total_files=len(files)\n processed_files = 0\n \n for file in files:\n processed_files+=1\n if \".wav\" in file: \n print(\"Checking files from \" + path + \" set \" + str(processed_files) + \"/\" + str(total_files), end=\"\\r\")\n if os.path.getsize(join(directory, path, file)) <= 0:\n corrupted_files.append(file)\n continue\n data, _ = soundfile.read(join(directory, path, file))\n if len(data) <= 0:\n corrupted_files.append(file)\n\n print()\n print(\"Done checking \" + path + \" set\")\n print(\"=====================\")\n\n with open('tuda_corrupted2.txt', 'w') as f:\n for file in corrupted_files:\n f.write(\"%s\\n\" % file)\n \n print(\"Done writing tuda_corrupted2.txt\" +\n \"Together with tuda_corrupted.txt they contain all corrupted files in Tuda-De\")\n print(\"=====================\")",
"def verify_images(root_dir, root_listdir):\n counter = 0\n\n for index, image_dir in enumerate(root_listdir):\n images_listdir = os.listdir(root_dir + \"/\" + image_dir)\n list_of_images_indices = [\n image_index\n for image_index in range(3, len(images_listdir) - 1)\n if image_index % 2 == 0\n ]\n for image_ind in list_of_images_indices:\n filename = root_dir + \"/\" + image_dir + \"/\" + images_listdir[image_ind]\n try:\n im = Image.open(filename)\n im.verify()\n im.close()\n except (OSError, ValueError):\n counter += 1\n\n print(\"%d files caused error due to OSError and ValueError.\" % counter)",
"def testError(self):\n cmds = \"\"\"chown 0 missingFile\npwd\nexit\n\"\"\"\n\n def _cbCheckResult(res):\n self.assertNotIn(self.testDir.asBytesMode().path, res)\n\n d = self._getBatchOutput(cmds)\n d.addCallback(_cbCheckResult)\n return d",
"def check_training_result_files(folder, ruleset, quiet, werror):\n\n too_many_errors = False\n result_folder = os.path.join(folder, 'results')\n for system_folder in _get_sub_folders(result_folder):\n for benchmark_folder in _get_sub_folders(system_folder):\n folder_parts = benchmark_folder.split('/')\n benchmark = folder_parts[-1]\n system = folder_parts[-2]\n\n # If it is not a recognized benchmark, skip further checks.\n if benchmark not in _ALLOWED_BENCHMARKS:\n print('Skipping benchmark: {}'.format(benchmark))\n continue\n\n # Find all result files for this benchmark.\n pattern = '{folder}/result_*.txt'.format(folder=benchmark_folder)\n result_files = glob.glob(pattern, recursive=True)\n\n # No result files were found. That is okay, because the organization\n # may not have submitted any results for this benchmark.\n if not result_files:\n print('No Result Files!')\n continue\n\n _print_divider_bar()\n print('System {}'.format(system))\n print('Benchmark {}'.format(benchmark))\n\n # If the organization did submit results for this benchmark, the number\n # of result files must be an exact number.\n if len(result_files) != _EXPECTED_RESULT_FILE_COUNTS[benchmark]:\n print('Expected {} runs, but detected {} runs.'.format(\n _EXPECTED_RESULT_FILE_COUNTS[benchmark],\n len(result_files)))\n\n errors_found = 0\n result_files.sort()\n for result_file in result_files:\n result_basename = os.path.basename(result_file)\n result_name, _ = os.path.splitext(result_basename)\n run = result_name.split('_')[-1]\n\n # For each result file, run the benchmark's compliance checks.\n _print_divider_bar()\n print('Run {}'.format(run))\n config_file = '{ruleset}/common.yaml'.format(\n ruleset=ruleset,\n benchmark=benchmark)\n checker = mlp_compliance.make_checker(\n ruleset=ruleset,\n quiet=quiet,\n werror=werror)\n valid, _, _, _ = mlp_compliance.main(result_file, config_file, checker)\n if not valid:\n errors_found += 1\n if errors_found == 1:\n print('WARNING: One file does not comply.')\n print('WARNING: Allowing this failure under olympic scoring rules.')\n if errors_found > 1:\n too_many_errors = True\n\n _print_divider_bar()\n if too_many_errors:\n raise Exception('Found too many errors in logging, see log above for details.')",
"def assert_filenames(self):\n print(\"Asserting filenames: \", end=\"\")\n error_files = []\n\n for data_dir in data_settings.BLOCK_DATA_DIRS:\n\n filenames = os.listdir(data_dir)\n\n for filename in filenames:\n\n if 'aux.xml' in filename or 'yield':\n\n continue\n\n try:\n\n filename_split = filename.split(\"_\")\n date = filename_split[0]\n _, suffix = filename_split[-1].split(\".\")\n\n assert suffix == 'tif', \"Wrong file suffix\"\n assert len(date) == 8, \"Wrong amount of numbers in date\"\n assert date[0:4] == '2017', \"Year is wrong\"\n assert date[4] == '0', \"No double digit months in dataset\"\n assert date[5] in ['4', '5', '6', '7', '8',\n '9'], \"Month outside dataset range\"\n assert date[6] in ['0', '1', '2',\n '3'], \"Ten-indicator for day is wrong\"\n assert date[7] in ['0', '1', '2', '3', '4', '5',\n '6', '7', '8', '9'], \"Date is not a digit\"\n assert 'ndvi' in filename or 'drone_rgb' in filename or 'drone_ndvi' in filename, \"Proper type is missing\"\n\n if 'sentinel_ndvi' in filename:\n\n assert len(filename) == 26, \"Filename wrong for {} in {}\".format(\n filename, data_dir)\n\n if 'drone_ndvi' in filename:\n\n assert len(filename) == 23, \"Filename wrong for {} in {}\".format(\n filename, data_dir)\n\n if 'drone_rgb' in filename:\n\n assert len(filename) == 22, \"Filename wrong for {} in {}\".format(\n filename, data_dir)\n\n except (AssertionError, ValueError) as ex:\n\n error_files.append(\"{}: {}\".format(\n ex, os.path.join(data_dir, filename)))\n\n if not error_files:\n\n print(\"All generated block datasets named correctly!\")\n\n else:\n\n print(\"There were some problems with the following files\")\n\n for error_file in error_files:\n print(\"\\t{}\".format(error_file))",
"def check_comps(root, comps):\n for key, comp in comps.items():\n\n filename = os.path.join(root, comp['filename'])\n if not os.path.isfile(filename):\n warnings.warn(\n 'The file {0} could not be found'.format(filename))",
"def test_verify_corrupt_archive(self):\n self.backup(u\"full\", u\"testfiles/various_file_types\", options=[])\n output_files = os.listdir(\"testfiles/output\")\n archives = [elem for elem in output_files if \"vol\" in elem]\n for archive in archives:\n # Edit source file\n with open(\"testfiles/output/\" + archive, 'r+') as f:\n f.write('This writes text into each archive file to corrupt it.')\n # Test verify for the file\n try:\n self.verify(u'testfiles/various_file_types/executable', file_to_verify=u'executable', options=[])\n except CmdError as e:\n # Should return a 21 error code for \"hash mismatch\"\n self.assertEqual(e.exit_status, 21, str(e))\n else:\n self.fail('Expected Hash Mismatch Error not thrown')",
"def test_stress_strain_both_files(generate_two_files_both_stress_strain):\n fname = generate_two_files_both_stress_strain\n with pytest.raises(Exception):\n process_files([fname[0],fname[1]])",
"def combine(files, output):\n # read all files\n bxrs = [h5py.File(f,'r') for f in files]\n # some paths we might care about & will copy\n metadata_paths = [\n '3BRecInfo/3BRecVars/MaxVolt',\n '3BRecInfo/3BRecVars/MinVolt',\n '3BRecInfo/3BRecVars/BitDepth',\n '3BRecInfo/3BRecVars/SignalInversion',\n '3BRecInfo/3BRecVars/SamplingRate',\n '3BRecInfo/3BRecVars/ExperimentType',\n '3BRecInfo/3BMeaChip/NRows',\n '3BRecInfo/3BMeaChip/NCols',\n '3BRecInfo/3BMeaChip/Layout',\n '3BRecInfo/3BMeaChip/MeaType',\n '3BRecInfo/3BMeaSystem/FwVersion',\n '3BRecInfo/3BMeaSystem/HwVersion',\n '3BRecInfo/3BMeaSystem/System'\n ]\n\n # count n_frames, n_samples from each file\n # also verify that key metadata matches\n n_frames = bxrs[0]['3BRecInfo/3BRecVars/NRecFrames'][0]\n n_samples = [bxrs[0]['3BData/Raw'].shape[0]]\n sampling_rate = bxrs[0]['3BRecInfo/3BRecVars/SamplingRate'][0]\n print(\"checking that all brw files have matching metadata\")\n for b in bxrs[1:]:\n for m in metadata_paths:\n try:\n if len(bxrs[0][m])==1:\n assert bxrs[0][m][:] == b[m][:]\n else:\n assert np.all(bxrs[0][m][:] == b[m][:])\n except Exception as E:\n logger.warn(f\"\"\"metadata does not match for {m}:\n found {bxrs[0][m]} and {b[m]}\n \"\"\")\n n_frames += b['3BRecInfo/3BRecVars/NRecFrames'][0]\n n_samples.append(b[\"3BData/Raw\"].shape[0])\n print(f\"combined duration: {n_frames/sampling_rate/60:.2f} minutes\")\n\n out_bxr = h5py.File(output, \"w\")\n # copy metadata\n bxrs[0].visititems(partial(glia.copy_metadata, copy_to=out_bxr))\n\n # copy data\n out_bxr['3BRecInfo/3BRecVars/NRecFrames'] = [n_frames]\n out_bxr['nSamplesPerRecording'] = n_samples\n tot_samples = sum(n_samples)\n assert np.isclose(tot_samples/n_frames, 4096) #4096 channels\n \n # copy raw data\n raw_dtype = bxrs[0][\"3BData/Raw\"].dtype\n dset = out_bxr.create_dataset(\"3BData/Raw\", (tot_samples,),\n dtype=raw_dtype)\n start_sample = 0\n max_chunk = int(1e8) # <1GiB \n for i, b in enumerate(bxrs):\n print(f\"Copying {files[i]}\")\n end_sample = start_sample+n_samples[i]\n for s in tqdm(range(0,n_samples[i],max_chunk)):\n e = min(s+max_chunk, end_sample)\n dset[start_sample+s:start_sample+e] = b[\"3BData/Raw\"][s:e]\n start_sample = end_sample\n\n # cleanup\n out_bxr.close()\n [b.close() for b in bxrs]",
"def test_errors(in_fastq, references):\n error_checks = [0, 1, 2, 3]\n for error in error_checks:\n for ref in references:\n print ref[\"file\"], error\n run_bowtie(in_fastq, ref[\"file\"], None, error, 1e6)",
"def testFailFiles(self):\n # Cleaning possible files already occupying the available set\n self.dummySubscription.failFiles([])\n\n # First test - Test if initial file (on available set) is inserted in the\n # failed set - no arguments\n\n dummyFile2 = File('/tmp/dummyfile2,8888', 1, 1, 1)\n # Insert dummyFile2 into the available files Set at dummySubscription\n self.dummySubscription.available.addFile(dummyFile2)\n\n S = self.dummySubscription.availableFiles()\n # Fail all files\n self.dummySubscription.failFiles(S)\n\n assert len(self.dummySubscription.availableFiles()) == 0, \\\n \"failed subscription still has %s files, what's up with that?\" % \\\n len(self.dummySubscription.availableFiles())\n\n # Second test - Test if target files are inserted at the failed set\n\n dummyFileList = []\n # Populating the dummy List with a random number of files\n for i in range(1, random.randint(100, 1000)):\n lfn = '/store/data/%s/%s/file.root' % (random.randint(1000, 9999),\n random.randint(1000, 9999))\n size = random.randint(1000, 2000)\n events = 1000\n run = random.randint(0, 2000)\n lumi = random.randint(0, 8)\n\n file = File(lfn=lfn, size=size, events=events,\n checksums={\"cksum\": \"1\"})\n file.addRun(Run(run, *[lumi]))\n dummyFileList.append(file)\n # Add the new files\n self.dummySubscription.available.addFile(dummyFileList)\n # and fail them\n self.dummySubscription.failFiles(files=dummyFileList)\n # Check there are no files available - everything should be failed\n assert len(self.dummySubscription.availableFiles()) == 0, \\\n \"failed subscription still has %s files, what's up with that?\" % \\\n len(self.dummySubscription.availableFiles())\n\n # Check if all files were inserted at subscription's failed files Set\n for x in dummyFileList:\n assert x in self.dummySubscription.failed.getFiles(type='set'), \\\n 'Couldn\\'t make file failed %s' % x.dict['lfn']\n\n # Third test - Test if a replicate file is erased from the other Sets,\n # when a file is considered failed\n\n dummyFile3 = File('/tmp/dummyfile3,5555', 1, 1, 1)\n dummyFileList = []\n dummyFileList.append(dummyFile3)\n\n # Inserting dummyFile3 to be used as an argument, into each of the other\n # file sets\n self.dummySubscription.acquired.addFile(dummyFile3)\n self.dummySubscription.available.addFile(dummyFile3)\n self.dummySubscription.completed.addFile(dummyFile3)\n\n # Run the method failFiles\n self.dummySubscription.failFiles(files=dummyFileList)\n\n # Check if dummyFile3 was inserted at the failed Set\n assert dummyFile3 in self.dummySubscription.failed.getFiles(type='set'), \\\n 'Replicated file could\\'nt be inserted at failed Set'\n\n # Check if dummyFile3 was erased from all the other Sets\n assert dummyFile3 not in self.dummySubscription.acquired.getFiles(type='set'), \\\n 'Failed file still present at acquired Set'\n assert dummyFile3 not in self.dummySubscription.completed.getFiles(type='set'), \\\n 'Failed file still present at completed Set'\n assert dummyFile3 not in self.dummySubscription.available.getFiles(type='set'), \\\n 'Failed file still present at available Set'",
"def checkCopiedFiles(self):\n self.missingAiCopies = 0\n self.invalidAiCopies = 0\n self.invalidMapCopies = 0\n self.missingMapCopies = 0\n\n for iFile in self.inputFilesAll:\n if not (os.path.isfile(self.MAPCOPY + iFile + '.msb')):\n self.missingMapCopies += 1\n else:\n with open(self.MAPCOPY + iFile + '.msb', 'rb') as testFile:\n if (len(testFile.read()) < 10):\n self.invalidMapCopies += 1\n\n if not (iFile == \"m12_00_00_01\"):\n if (self.useDCX):\n if not (os.path.isfile(self.AICOPY + iFile + '.luabnd.dcx')):\n self.missingAiCopies += 1\n else:\n with open(self.AICOPY + iFile + '.luabnd.dcx', 'rb') as testFile:\n if (len(testFile.read()) < 10):\n self.invalidAiCopies += 1\n else:\n if not (os.path.isfile(self.AICOPY + iFile + '.luabnd')):\n self.missingAiCopies += 1\n else:\n with open(self.AICOPY + iFile + '.luabnd', 'rb') as testFile:\n if (len(testFile.read()) < 10):\n self.invalidAiCopies += 1\n\n if (self.missingAiCopies > 0 or self.invalidAiCopies > 0 or self.missingMapCopies > 0 or self.invalidMapCopies > 0 or self.missingSfxCopies > 0 or self.invalidSfxCopies > 0):\n return False\n else:\n return True",
"def test_duplicate_images_error(self):\n with self.assertRaises(AssertionError):\n disk.merge_datasets(self.input_datasets, self.output_dataset)\n\n # Original dataset shouldn't be modified.\n self.assertEqual(0, len(self.output_dataset.metadata()))",
"def test_verify_corrupt_archive_compare_data(self):\n self.backup(u\"full\", u\"testfiles/various_file_types\", options=[])\n output_files = os.listdir(\"testfiles/output\")\n archives = [elem for elem in output_files if \"vol\" in elem]\n for archive in archives:\n # Edit source file\n with open(\"testfiles/output/\" + archive, 'r+') as f:\n f.write('This writes text into each archive file to corrupt it.')\n # Test verify for the file\n try:\n self.verify(u'testfiles/various_file_types/executable', file_to_verify=u'executable',\n options=[u\"--compare-data\"])\n except CmdError as e:\n # Should return a 21 error code for \"hash mismatch\"\n self.assertEqual(e.exit_status, 21, str(e))\n else:\n self.fail('Expected Hash Mismatch Error not thrown')",
"def check_files(filenames, fix, verboseout, summaryout):\n\tokmsg = \"OK\" if not fix else \"fixed\"\n\tbadmsg = \"non-conforming\"\n\tbad_files = 0\n\tfor fn in filenames:\n\t\tlines = read_file_and_maybe_fix_it(fn, fix)\n\t\tif check_content(fn, lines, verboseout):\n\t\t\tprint(\"{:s}: {}\".format(fn, okmsg), file=summaryout)\n\t\telse:\n\t\t\tbad_files += 1\n\t\t\tmsg = \"{:s}: {}\".format(fn, badmsg)\n\t\t\tprint(msg, file=summaryout)\n\treturn bad_files",
"def _test_align_file_existance(self):\n if len(self._pathcreator.get_read_files()) == 0:\n self._write_err_msg_and_quit(\"Error! No read libraries given!\\n\")\n if len(self._ref_seq_files) == 0:\n self._write_err_msg_and_quit(\n \"Error! No reference sequence files given!\\n\"\n )",
"def test_does_not_validate_invalid_files(self):\n bad_files = (\n 'newstest2019-defr-src-ts.de.sgm',\n 'newstest2019-defr-src-ts.de.xml',\n )\n for bad_file in bad_files:\n bad_path = join(getcwd(), 'testdata', bad_file)\n with self.assertRaises(ValueError):\n _ = valitest.ValidatableTestSet(bad_path)",
"def perform_filecheck():\n\n\t# Open files\n\ttrain = open('train_aae_final', 'r')\n\ttest = open('test_aae_final', 'r')\n\n\n\t# Check number of training and testing samples\n\tprint (\"\")\n\tprint (\"Number of training samples =\", len(train.readlines()))\n\tprint (\"Number of testing samples =\", len(test.readlines()))\n\tprint (\"\")\n\n\ttrain.close()\n\ttest.close()",
"def test_css_top_files_belong(self):\n top, std, bottom = heavy_lifting.organize_css_files(self.fake_file_list)\n for fle in top:\n self.assertIn(os.path.basename(fle), list_css_top_files())",
"def test_css_bottom_files_belong(self):\n top, std, bottom = heavy_lifting.organize_css_files(self.fake_file_list)\n for fle in bottom:\n self.assertIn(os.path.basename(fle), list_css_bottom_files())",
"def test_stress_not_in_two_files(generate_no_stress_one_file):\n fname = generate_no_stress_one_file\n with pytest.raises(Exception):\n process_files([fname, fname])",
"def hash_check_files(self):\n temp_error = 0\n if not self.hash_log_curr:\n self.hash_log_curr = self.hash_curr_files\n else:\n for key, value in self.hash_curr_files.iteritems():\n if key in self.hash_log_curr:\n #test for valid hash\n if self.valid is not None:\n #test any valid hahses are given\n if key in self.valid:\n # a hash code that is ok to duplicate\n self.print_to_log('Valid Duplicate HashCode, skipping: ' + value[5])\n self.hash_log_curr[key][3] = str(int(self.hash_log_curr[key][3]) + 1)\n self.hash_log_curr[key][4] = value[4]\n continue\n # not valid duplicate hash\n # a dupulicate hash found which is a failure and should abort import\n self.hash_log_curr[key][0] = 'Fail'\n self.hash_log_curr[key][3] = str(int(self.hash_log_curr[key][3]) + 1)\n self.hash_log_curr[key][4] = value[4]\n self.hash_log_curr[key][5] += ', ' + value[5]\n self.print_to_log('Duplicate hash found for file: ' + value[5])\n temp_error = 1\n else:\n #a new hash, no issues\n self.hash_log_curr[key] = value\n self.print_to_log('New Hash for file: ' + value[5])\n self.error = temp_error",
"def allSWCImport_test():\n\n swcFiles = []\n\n for dirPath, dirNames, fileNames in os.walk(\"tests/117.v3dpbd\"):\n\n swcFiles += [os.path.join(dirPath, fileName)\n for fileName in fileNames if fileName.endswith(\".swc\")]\n\n for swcFile in swcFiles:\n\n print(\"Testing the import of {}\".format(swcFile))\n try:\n NeuronMorphology(swcFile)\n\n except Exception as e:\n if swcFile in [\n 'tests/117.v3dpbd/10_117.v3dpbd_ENT_updated.swc',\n \"tests/117.v3dpbd/05_117.v3dpbd_Advantra.swc\",\n \"tests/117.v3dpbd/15_117.v3dpbd_app2new2.swc\",\n \"tests/117.v3dpbd/01_117.v3dpbd_axis_analyzer.swc\",\n \"tests/117.v3dpbd/18_117.v3dpbd_x1439_y1439_z474_app2.swc\",\n \"tests/117.v3dpbd/13_117.v3dpbd_app2new1.swc\",\n \"tests/117.v3dpbd/12_117.v3dpbd_Advantra_updated.swc\",\n \"tests/117.v3dpbd/19_117.v3dpbd_NeuroGPSTree_updated.swc\",\n \"tests/117.v3dpbd/21_117.v3dpbd_tubularity_model_S.v3draw_MST_Tracing_Ws_21_th_200.swc\",\n \"tests/117.v3dpbd/14_117.v3dpbd_app2new3.swc\",\n \"tests/117.v3dpbd/20_117.v3dpbd_tubularity_model_S.v3draw_MST_Tracing_Ws_21_th_170_updated.swc\",\n \"tests/117.v3dpbd/11_117.v3dpbd_NeuronChaser_updated.swc\",\n \"tests/117.v3dpbd/22_117.v3dpbd_Rayshooting.swc\",\n ]:\n print(e)\n assert type(e) is NotImplementedError and \\\n str(e) == \"No Soma Found for {}\".format(swcFile)\n elif swcFile in [\n \"tests/117.v3dpbd/03_117.v3dpbd_NeuroGPSTree.swc\",\n \"tests/117.v3dpbd/08_117.v3dpbd_neutube_updated.swc\",\n \"tests/117.v3dpbd/04_117.v3dpbd_axis_analyzer_updated.swc\",\n \"tests/117.v3dpbd/06_117.v3dpbd_MOST.swc\",\n \"tests/117.v3dpbd/09_117.v3dpbd_neutu_autotrace.swc\",\n \"tests/117.v3dpbd/07_117.v3dpbd_neutube.swc\",\n \"tests/117.v3dpbd/02_117.v3dpbd_MST_Tracing.swc\",\n ]:\n print(e)\n assert type(e) is ValueError and \\\n str(e) == \"Given SWC File {} has more than one trees\".format(swcFile)\n\n elif swcFile in [\n \"tests/117.v3dpbd/16_117.v3dpbd_EnsembleNeuronTracerV2n.swc\",\n \"tests/117.v3dpbd/17_117.v3dpbd_EnsembleNeuronTracerV2s.swc\"\n ]:\n print(e)\n assert type(e) is AssertionError and \\\n str(e) == \"File {} has cyclic connections!\".format(swcFile)\n\n else:\n print(e)\n assert False",
"def test_file_analyzer(self):\r\n file_analyzer = FileAnalyzer(\"C:\\\\Users\\\\Himan\\\\Desktop\\\\Semester 2\\\\SSW 810\\\\HW\\\\Assignment 8\")\r\n self.assertEqual(file_analyzer.files_summary, {'HW08_Himanshu.py': {'class': 1, 'function': 5, 'line': 100, 'char': 4472}, \\\r\n 'HW08_Test_Himanshu.py': {'class': 1, 'function': 3, 'line': 38, 'char': 1861}})\r\n self.assertNotEqual(file_analyzer.files_summary, {'HW08_Himanshu.py': {'class': 0, 'function': 5, 'line': 46, 'char': 1931}})\r\n\r\n self.assertNotEqual(file_analyzer.files_summary, {'HW08_Himanshu.py': {'class': 1, 'function': 5, 'line': 100}}) # testing less fields\r\n\r\n with self.assertRaises(FileNotFoundError): # raises exception error\r\n FileAnalyzer(\"C:\\\\Users\\\\Himan\\\\Desktop\\\\Semester 2\\\\SSW 810\\\\HW\\\\Assignment 10\").files_summary",
"def output_errors(outputs, gold, sick_ids, sick_sentences):\n with open('./working/err.txt', 'w') as out_f:\n out_f.write('pair_ID\\tdiff\\tpred\\tcorr\\tsentence1\\tsentence2\\n')\n errs = []\n for i, line in enumerate(outputs):\n data = line\n corr = gold[i]\n diff = abs(data-corr)\n if diff > 0.75:\n errs.append((sick_ids[i], round(diff, 1), round(data, 1), corr, ' '.join(sick_sentences[i][0]), ' '.join(sick_sentences[i][1])))\n\n errs.sort(key=lambda x:-x[1])\n\n for line in errs:\n out_f.write('{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\\n'.format(*line))",
"def storefront_check_errors():\n\n\tcurrentView = uidoc.ActiveView\n\tfamTypeDict = GetFamilyTypeDict(\"Fabrication-Error-Symbol\")\n\n\t# Clear existing error notations\n\terrorNotations = list(GetElementsInView(BuiltInCategory.OST_GenericAnnotation, Autodesk.Revit.DB.FamilyInstance, currentView.Id))\n\terrorNotations = FilterElementsByName(doc, errorNotations,[\"Fabrication\",\"Error-Symbol\"], False)\n\tif errorNotations:\n\t\twith rpw.db.Transaction(\"Place Errors\"):\n\t\t\tfor error in errorNotations:\n\t\t\t\tdoc.Delete(error)\n\n\n\tdef PointsAndErrors(mullions_list, errorName, cat_or_ids):\n\t\t\"\"\"adds to lists of points and errors\"\"\"\n\t\terrorsToFlag = []\n\t\tcompList =[]\n\t\tfor m in mullions_list:\n\t\t\tmElem = doc.GetElement(m)\n\t\t\tif m not in compList:\n\t\t\t\tintersectingMulls = FindIntersectingMullions(mElem, cat_or_ids)\n\t\t\t\tif list(intersectingMulls):\n\t\t\t\t\tmullPt = mElem.Location.Point\n\t\t\t\t\terrorsToFlag.append([mullPt, errorName])\n\t\t\t\t\tfor mm in list(intersectingMulls):\n\t\t\t\t\t\tcompList.append(mm.Id)\n\t\treturn errorsToFlag\n\n\tdef MullionClash():\n\n\t\terrorsToFlag = []\n\n\t\tselectedLevel = __revit__.ActiveUIDocument.ActiveView.GenLevel.Id\n\n\t\tallMullions = GetAllElements(doc, BuiltInCategory.OST_CurtainWallMullions, Autodesk.Revit.DB.FamilyInstance, currentView=True)\n\t\tallWalls = GetAllElements(doc, BuiltInCategory.OST_Walls, Autodesk.Revit.DB.Wall, currentView=True)\n\n\t\tallWalls = FilterElementsByName(doc, allWalls, [\"Storefront\",\"Storefront\"], True)\n\n\t\terrorsToFlag += PointsAndErrors(allMullions, \"Mullion-Mullion Intersects\", BuiltInCategory.OST_CurtainWallMullions)\n\t\terrorsToFlag += PointsAndErrors(allMullions, \"Mullion-Panel Intersects\", BuiltInCategory.OST_CurtainWallPanels)\n\t\tif allWalls:\n\t\t\terrorsToFlag += PointsAndErrors(allMullions, \"Mullion-Wall Intersects\", allWalls)\n\n\t\treturn errorsToFlag\n\n\tdef PanelClash():\n\n\n\t\terrorsToFlag = []\n\t\t\n\t\tallPanels = GetAllElements(doc, BuiltInCategory.OST_Windows, Autodesk.Revit.DB.FamilyInstance, currentView=True)\n\t\tallPanels = FilterDemolishedElements(doc, allPanels)\n\n\t\tpanelMinWidth = 0.45\n\t\tpanelMaxWidth = 5.0\n\t\tpanelMaxHeight = 8.14\n\n\t\t### ITERATE OVER PANEL LIST ###\n\t\tfor p in allPanels:\n\t\t\tfamInst = doc.GetElement(p)\n\n\t\t\tpan_height = famInst.Parameter[BuiltInParameter.FAMILY_HEIGHT_PARAM].AsDouble()\n\t\t\tpan_width = famInst.Parameter[BuiltInParameter.FAMILY_WIDTH_PARAM].AsDouble()\n\n\t\t\tif \"empty\" not in famInst.Name.lower():\n\t\t\t\tif pan_width < panelMinWidth:\n\t\t\t\t\terrorsToFlag.append([famInst.GetTransform().Origin, \"Small Panel\"])\n\t\t\t\telif pan_width > panelMaxWidth:\n\t\t\t\t\terrorsToFlag.append([famInst.GetTransform().Origin, \"Wide Panel\"])\n\t\t\t\telif pan_height > panelMaxHeight:\n\t\t\t\t\terrorsToFlag.append([famInst.GetTransform().Origin, \"Tall Panel\"])\n\t\t\telse:\n\t\t\t\tpass\n\t\t\n\t\treturn errorsToFlag\n\n\tdef ECWallClash():\n\n\t\terrorsToFlag = []\n\t\tcolumnsLinesEdgesEC = []\n\t\twallsLinesEdgesEC = []\n\n\n\t\tdocLoaded = RevitLoadECDocument(quiet=True)\n\t\tif docLoaded[0]:\n\t\t\tdocEC = docLoaded[0]\n\t\t\tecTransform = docLoaded[1]\n\n\t\t\tselectedLevel = __revit__.ActiveUIDocument.ActiveView.GenLevel.Id\n\n\t\t\tselectedLevelInst = doc.GetElement(selectedLevel)\n\t\t\tlevelElevationEC = None \n\t\t\tfor p in selectedLevelInst.Parameters:\n\t\t\t\tif p.Definition.Name == \"Elevation\":\n\t\t\t\t\tlevelElevationEC = p.AsDouble()\n\n\t\t\tallWallsEC = GetAllElements(docEC, BuiltInCategory.OST_Walls, Autodesk.Revit.DB.Wall)\n\t\t\tallColumnsEC = GetAllElements(docEC, BuiltInCategory.OST_Columns, Autodesk.Revit.DB.FamilyInstance)\n\t\t\tallColumnsEC += GetAllElements(docEC, BuiltInCategory.OST_StructuralColumns, Autodesk.Revit.DB.FamilyInstance)\n\n\t\t\tselectedWallsEC = FilterElementsByLevel(docEC, allWallsEC, levelElevationEC)\n\t\t\tselectedColumnsEC = FilterElementsByLevel(docEC, allColumnsEC, levelElevationEC)\n\n\t\t\twallsLinesEdgesEC = GetWallEdgeCurves(docEC, selectedWallsEC, ecTransform)\n\t\t\tcolumnsLinesEdgesEC = GetColumnEdgeCurves(docEC, selectedColumnsEC, ecTransform)\n\n\t\tallWalls = GetAllElements(doc, BuiltInCategory.OST_Walls, Autodesk.Revit.DB.Wall, currentView=True)\n\t\tstorefrontWalls = FilterElementsByName(doc, allWalls,[\"Storefront\",\"Storefront\"], False)\n\t\tstorefrontWalls = FilterWallsByKind(doc, storefrontWalls, \"Basic\")\n\n\t\tobstructionEdges = columnsLinesEdgesEC\n\t\tobstructionEdges += wallsLinesEdgesEC\n\n\t\tif obstructionEdges:\n\t\t\tfor sfWallId in storefrontWalls:\n\t\t\t\tsfWall = doc.GetElement(sfWallId)\n\t\t\t\tlocLine = sfWall.Location.Curve\n\t\t\t\tlocLineStart = locLine.GetEndPoint(0)\n\t\t\t\tlocLineEnd = locLine.GetEndPoint(1)\n\n\t\t\t\tfor obstructionLine in obstructionEdges:\n\t\t\t\t\tobstLineElevation = obstructionLine.GetEndPoint(0).Z\n\t\t\t\t\tlocLineStart = XYZ(locLineStart.X, locLineStart.Y, obstLineElevation)\n\t\t\t\t\tlocLineEnd = XYZ(locLineEnd.X, locLineEnd.Y, obstLineElevation)\n\t\t\t\t\tlocLineFlat = Line.CreateBound(locLineStart, locLineEnd)\n\t\t\t\t\tintersection = RevitCurveCurveIntersection(locLineFlat,obstructionLine)\n\n\t\t\t\t\tif intersection:\n\t\t\t\t\t\t#ERROR: Hit Existing Condition\n\t\t\t\t\t\terrorsToFlag.append([intersection, \"Hit EC\"])\n\n\t\treturn errorsToFlag\n\n\tallErrors = []\n\tallErrors += ECWallClash()\n\tallErrors += MullionClash()\n\tallErrors += PanelClash()\n\n\terrorSymbolId = famTypeDict[\"Fabrication-Error-Symbol\"]\n\n\tif allErrors:\n\t\twith rpw.db.Transaction(\"Error Check\"):\n\t\t\tRevitPlaceErrorsInView(currentView, allErrors, errorSymbolId)",
"def test_check_mapping_file_multiple_problems(self):\r\n\r\n check_mapping_file(mapping_fp=self.errors_warnings_mapping_fp,\r\n output_dir=self.output_dir,\r\n added_demultiplex_field=\"DoesNotExist\",\r\n verbose=False)\r\n\r\n # Check existence of expected output files\r\n output_html_fp = join(self.output_dir,\r\n basename(self.errors_warnings_mapping_fp).replace('.txt', '.html'))\r\n output_corrected_fp =\\\r\n join(self.output_dir,\r\n basename(self.errors_warnings_mapping_fp).replace('.txt',\r\n '_corrected.txt'))\r\n output_log_fp =\\\r\n join(self.output_dir,\r\n basename(self.errors_warnings_mapping_fp).replace('.txt', '.log'))\r\n overlib_js_fp = join(self.output_dir, 'overlib.js')\r\n\r\n self.assertTrue(exists(output_html_fp))\r\n self.assertTrue(exists(output_corrected_fp))\r\n self.assertTrue(exists(output_log_fp))\r\n self.assertTrue(exists(overlib_js_fp))\r\n\r\n # Check output data for expected results\r\n\r\n html_data = \"\".join([line for line in open(output_html_fp, \"U\")])\r\n corrected_data =\\\r\n \"\".join([line for line in open(output_corrected_fp, \"U\")])\r\n log_data = \"\".join([line for line in open(output_log_fp, \"U\")])\r\n\r\n self.assertEqual(html_data, self.expected_html_errors_warnings_output)\r\n self.assertEqual(corrected_data,\r\n self.expected_corrected_data_errors_warnings)\r\n self.assertEqual(log_data, self.expected_log_errors_warnings_output)"
] | [
"0.6594361",
"0.6371965",
"0.6179928",
"0.617114",
"0.613714",
"0.61048645",
"0.6091394",
"0.6054181",
"0.5993377",
"0.5975022",
"0.5965196",
"0.5943787",
"0.59276956",
"0.5922497",
"0.59199405",
"0.5838658",
"0.58281934",
"0.58267826",
"0.581185",
"0.58048946",
"0.57999486",
"0.5792472",
"0.5785924",
"0.5777578",
"0.5773663",
"0.57687557",
"0.57530075",
"0.57375085",
"0.57300115",
"0.57296735"
] | 0.7143594 | 0 |
Test getting all center invalid errors | def test_get_center_invalid_errors(syn):
with patch.object(
syn, "tableQuery", return_value=QueryResponse
) as patch_query, patch.object(
write_invalid_reasons, "_combine_center_file_errors", return_value="errors"
) as patch_combine:
center_invalid = write_invalid_reasons.get_center_invalid_errors(syn, "syn3333")
assert center_invalid == {"SAGE": "errors", "TEST": "errors"}
patch_query.assert_called_once_with("SELECT * FROM syn3333")
assert patch_combine.call_count == 2 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_errors(self) -> None:",
"def storefront_check_errors():\n\n\tcurrentView = uidoc.ActiveView\n\tfamTypeDict = GetFamilyTypeDict(\"Fabrication-Error-Symbol\")\n\n\t# Clear existing error notations\n\terrorNotations = list(GetElementsInView(BuiltInCategory.OST_GenericAnnotation, Autodesk.Revit.DB.FamilyInstance, currentView.Id))\n\terrorNotations = FilterElementsByName(doc, errorNotations,[\"Fabrication\",\"Error-Symbol\"], False)\n\tif errorNotations:\n\t\twith rpw.db.Transaction(\"Place Errors\"):\n\t\t\tfor error in errorNotations:\n\t\t\t\tdoc.Delete(error)\n\n\n\tdef PointsAndErrors(mullions_list, errorName, cat_or_ids):\n\t\t\"\"\"adds to lists of points and errors\"\"\"\n\t\terrorsToFlag = []\n\t\tcompList =[]\n\t\tfor m in mullions_list:\n\t\t\tmElem = doc.GetElement(m)\n\t\t\tif m not in compList:\n\t\t\t\tintersectingMulls = FindIntersectingMullions(mElem, cat_or_ids)\n\t\t\t\tif list(intersectingMulls):\n\t\t\t\t\tmullPt = mElem.Location.Point\n\t\t\t\t\terrorsToFlag.append([mullPt, errorName])\n\t\t\t\t\tfor mm in list(intersectingMulls):\n\t\t\t\t\t\tcompList.append(mm.Id)\n\t\treturn errorsToFlag\n\n\tdef MullionClash():\n\n\t\terrorsToFlag = []\n\n\t\tselectedLevel = __revit__.ActiveUIDocument.ActiveView.GenLevel.Id\n\n\t\tallMullions = GetAllElements(doc, BuiltInCategory.OST_CurtainWallMullions, Autodesk.Revit.DB.FamilyInstance, currentView=True)\n\t\tallWalls = GetAllElements(doc, BuiltInCategory.OST_Walls, Autodesk.Revit.DB.Wall, currentView=True)\n\n\t\tallWalls = FilterElementsByName(doc, allWalls, [\"Storefront\",\"Storefront\"], True)\n\n\t\terrorsToFlag += PointsAndErrors(allMullions, \"Mullion-Mullion Intersects\", BuiltInCategory.OST_CurtainWallMullions)\n\t\terrorsToFlag += PointsAndErrors(allMullions, \"Mullion-Panel Intersects\", BuiltInCategory.OST_CurtainWallPanels)\n\t\tif allWalls:\n\t\t\terrorsToFlag += PointsAndErrors(allMullions, \"Mullion-Wall Intersects\", allWalls)\n\n\t\treturn errorsToFlag\n\n\tdef PanelClash():\n\n\n\t\terrorsToFlag = []\n\t\t\n\t\tallPanels = GetAllElements(doc, BuiltInCategory.OST_Windows, Autodesk.Revit.DB.FamilyInstance, currentView=True)\n\t\tallPanels = FilterDemolishedElements(doc, allPanels)\n\n\t\tpanelMinWidth = 0.45\n\t\tpanelMaxWidth = 5.0\n\t\tpanelMaxHeight = 8.14\n\n\t\t### ITERATE OVER PANEL LIST ###\n\t\tfor p in allPanels:\n\t\t\tfamInst = doc.GetElement(p)\n\n\t\t\tpan_height = famInst.Parameter[BuiltInParameter.FAMILY_HEIGHT_PARAM].AsDouble()\n\t\t\tpan_width = famInst.Parameter[BuiltInParameter.FAMILY_WIDTH_PARAM].AsDouble()\n\n\t\t\tif \"empty\" not in famInst.Name.lower():\n\t\t\t\tif pan_width < panelMinWidth:\n\t\t\t\t\terrorsToFlag.append([famInst.GetTransform().Origin, \"Small Panel\"])\n\t\t\t\telif pan_width > panelMaxWidth:\n\t\t\t\t\terrorsToFlag.append([famInst.GetTransform().Origin, \"Wide Panel\"])\n\t\t\t\telif pan_height > panelMaxHeight:\n\t\t\t\t\terrorsToFlag.append([famInst.GetTransform().Origin, \"Tall Panel\"])\n\t\t\telse:\n\t\t\t\tpass\n\t\t\n\t\treturn errorsToFlag\n\n\tdef ECWallClash():\n\n\t\terrorsToFlag = []\n\t\tcolumnsLinesEdgesEC = []\n\t\twallsLinesEdgesEC = []\n\n\n\t\tdocLoaded = RevitLoadECDocument(quiet=True)\n\t\tif docLoaded[0]:\n\t\t\tdocEC = docLoaded[0]\n\t\t\tecTransform = docLoaded[1]\n\n\t\t\tselectedLevel = __revit__.ActiveUIDocument.ActiveView.GenLevel.Id\n\n\t\t\tselectedLevelInst = doc.GetElement(selectedLevel)\n\t\t\tlevelElevationEC = None \n\t\t\tfor p in selectedLevelInst.Parameters:\n\t\t\t\tif p.Definition.Name == \"Elevation\":\n\t\t\t\t\tlevelElevationEC = p.AsDouble()\n\n\t\t\tallWallsEC = GetAllElements(docEC, BuiltInCategory.OST_Walls, Autodesk.Revit.DB.Wall)\n\t\t\tallColumnsEC = GetAllElements(docEC, BuiltInCategory.OST_Columns, Autodesk.Revit.DB.FamilyInstance)\n\t\t\tallColumnsEC += GetAllElements(docEC, BuiltInCategory.OST_StructuralColumns, Autodesk.Revit.DB.FamilyInstance)\n\n\t\t\tselectedWallsEC = FilterElementsByLevel(docEC, allWallsEC, levelElevationEC)\n\t\t\tselectedColumnsEC = FilterElementsByLevel(docEC, allColumnsEC, levelElevationEC)\n\n\t\t\twallsLinesEdgesEC = GetWallEdgeCurves(docEC, selectedWallsEC, ecTransform)\n\t\t\tcolumnsLinesEdgesEC = GetColumnEdgeCurves(docEC, selectedColumnsEC, ecTransform)\n\n\t\tallWalls = GetAllElements(doc, BuiltInCategory.OST_Walls, Autodesk.Revit.DB.Wall, currentView=True)\n\t\tstorefrontWalls = FilterElementsByName(doc, allWalls,[\"Storefront\",\"Storefront\"], False)\n\t\tstorefrontWalls = FilterWallsByKind(doc, storefrontWalls, \"Basic\")\n\n\t\tobstructionEdges = columnsLinesEdgesEC\n\t\tobstructionEdges += wallsLinesEdgesEC\n\n\t\tif obstructionEdges:\n\t\t\tfor sfWallId in storefrontWalls:\n\t\t\t\tsfWall = doc.GetElement(sfWallId)\n\t\t\t\tlocLine = sfWall.Location.Curve\n\t\t\t\tlocLineStart = locLine.GetEndPoint(0)\n\t\t\t\tlocLineEnd = locLine.GetEndPoint(1)\n\n\t\t\t\tfor obstructionLine in obstructionEdges:\n\t\t\t\t\tobstLineElevation = obstructionLine.GetEndPoint(0).Z\n\t\t\t\t\tlocLineStart = XYZ(locLineStart.X, locLineStart.Y, obstLineElevation)\n\t\t\t\t\tlocLineEnd = XYZ(locLineEnd.X, locLineEnd.Y, obstLineElevation)\n\t\t\t\t\tlocLineFlat = Line.CreateBound(locLineStart, locLineEnd)\n\t\t\t\t\tintersection = RevitCurveCurveIntersection(locLineFlat,obstructionLine)\n\n\t\t\t\t\tif intersection:\n\t\t\t\t\t\t#ERROR: Hit Existing Condition\n\t\t\t\t\t\terrorsToFlag.append([intersection, \"Hit EC\"])\n\n\t\treturn errorsToFlag\n\n\tallErrors = []\n\tallErrors += ECWallClash()\n\tallErrors += MullionClash()\n\tallErrors += PanelClash()\n\n\terrorSymbolId = famTypeDict[\"Fabrication-Error-Symbol\"]\n\n\tif allErrors:\n\t\twith rpw.db.Transaction(\"Error Check\"):\n\t\t\tRevitPlaceErrorsInView(currentView, allErrors, errorSymbolId)",
"def check_errors(self):\n\n errors = []\n while True:\n err = self.values(\"SYST:ERR?\")\n if int(err[0]) != 0:\n errmsg = \"Agilent 5313xA: {0}: {1}\".format(err[0], err[1])\n log.error(errmsg + '\\n')\n errors.append(errmsg)\n else:\n break\n\n return errors",
"def getAll(self):\n x,y,a = self.getxya()\t\n xerrs = [self.errors[0][i] for i in range(len(self.x)) if self.x[i]!=None and self.y[i]!=None]\n yerrs = [self.errors[1][i] for i in range(len(self.x)) if self.x[i]!=None and self.y[i]!=None]\t\n return x,y,a,xerrs,yerrs",
"def check_errors():\n\n for error in errors:\n ERROR('%s' % str(error))\n\n if len(errors) != 0:\n sys.exit(1)",
"def test_get_error_data_table_all_col_errors(self):\n field_setup = None\n error_names = None\n prepared_info = self.setup_error_data(field_setup, error_names, True)\n for row in prepared_info:\n self.assertEqual(row['expected'], row['actual'])",
"def test_get_error_data_all_col_errors(self):\n field_setup = None\n error_names = None\n prepared_info = self.setup_error_data(field_setup, error_names)\n for row in prepared_info:\n self.assertEqual(row['expected'], row['actual'])",
"def error(self) -> Sequence[float]:\n errors = []\n for line, sign in zip(self.marker_lines, (-1, 1)):\n if self._orientation == Orientation.UP_DOWN:\n picket_pos = self._fit(line.center.y)\n mlc_pos = line.center.x\n else:\n picket_pos = self._fit(line.center.x)\n mlc_pos = line.center.y\n if (\n self._separate_leaves\n ): # offset the picket position by the DLG and nominal gap\n mag_factor = self._image.sid / 1000\n picket_pos += (\n sign * self._nominal_gap_mm * mag_factor / 2 * self._image.dpmm\n )\n errors.append((mlc_pos - picket_pos) / self._image.dpmm)\n return errors",
"def checks(self, error_margin=0.1):\n\n # Check all compartments are positive\n for label in self.labels:\n assert self.compartments[label] >= 0.",
"def test_normalize_with_multiple_errors(self) -> None:\n errors_address = address_with_errors()\n try:\n normalize_an_address(errors_address)\n except ShipEngineError as err:\n assert err.request_id is not None\n assert err.request_id.startswith(\"req_\") is True\n assert err.source is ErrorSource.SHIPENGINE.value\n assert err.error_type is ErrorType.ERROR.value\n assert err.error_code is ErrorCode.INVALID_ADDRESS.value\n assert (\n err.message\n == \"Invalid address.\\nInvalid City, State, or Zip\\nInsufficient or Incorrect Address Data\"\n )",
"def has_errors(self) -> bool:",
"def test_check_cluster1():\n with pytest.raises(AssertionError) as err_info:\n Check_BuoyDC.check_cluster(cluster_fail_1)\n assert str(err_info.value) == 'cluster type input not within range of index'",
"def test_kyc_get_validation_legal(self):\n pass",
"def checkErrors(cHat, c):\n x, y, r = c[0], c[1], c[2]\n # Check if any circles are detected when they shouldn't be (false positive)\n if x == '-' or y =='-' or r == '-':\n if not(x == '-' or y =='-' or r == '-'):\n raise NullEntry()\n elif cHat is not None and len(cHat) >= 1:\n raise FalsePositiveDetection()\n elif math.isnan(float(c[0])) or math.isnan(float(c[1])) or math.isnan(float(c[2])):\n raise NaNError()\n # Check if circles weren't detected when they should have been (true negative)\n elif cHat is None:\n raise TrueNegativeDetection()",
"def test_get_xy_invalid_space():\n pass",
"def test_get_error_data_table_some_col_errors(self):\n field_setup = None\n error_names = ['first', 'billing_address_1', 'billing_country_area']\n prepared_info = self.setup_error_data(field_setup, error_names, True)\n for row in prepared_info:\n self.assertEqual(row['expected'], row['actual'])\n pass",
"def test__combine_center_file_errors(syn):\n expected_error = (\n f\"\\t{ENT1.name} ({ENT1.id}):\\n\\nmy errors\\nn\\n\\n\"\n f\"\\t{ENT1.name} ({ENT1.id}):\\n\\nerrors here\\nf\\n\\n\"\n )\n calls = [\n mock.call(\"syn1234\", downloadFile=False),\n mock.call(\"syn2345\", downloadFile=False),\n ]\n with patch.object(syn, \"get\", return_value=ENT1) as patch_synget:\n center_errors = write_invalid_reasons._combine_center_file_errors(\n syn, CENTER_ERRORSDF\n )\n assert center_errors == expected_error\n patch_synget.assert_has_calls(calls)",
"def lat_errors(self):\r\n try:\r\n _lat_errors = self._validate_latlon(self.sourceLatCol)\r\n return _lat_errors\r\n except:\r\n return None",
"def test_unfiltered_total_errors_detected(self):\n text_list, timestamps = pf.get_file(\"GenerateSRT.txt\")\n client = pf.initialize_api()\n sentences = pf.print_sentences(text_list)\n final_error_total = 0\n\n for i, token in enumerate(sentences):\n sequence_switched, end_matches, offset_list, err_message, sentence_error_total = \\\n edf.detect_errors(str(sentences[i]), client, True)\n\n final_error_total += sentence_error_total\n\n self.assertEqual(final_error_total, 6)",
"def check_latlon(self):\n\n for station in list(self.station_list.values()):\n station_def = self.station_definitions[station.name]\n lat = float(station.get_obs('LAT')[0])\n lon = float(station.get_obs('LON')[0])\n lat_diff = abs(lat - station_def['lat'])\n lon_diff = abs(lon - station_def['lon'])\n if lat_diff > .1:\n qc_error.all_qc_errors.append(\n qce(\n station_name=station.name,\n error_code=9000,\n old_data_value=lat,\n explanation=\"lats are different for: \" + station.name +\n \". Old value : \" + str(station_def['lat'])\n ))\n if lon_diff > .1:\n qc_error.all_qc_errors.append(\n qce(\n station_name=station.name,\n error_code=9000,\n old_data_value=lon,\n explanation=\"lons are different for: \" + station.name +\n \". Old value : \" + str(station_def['lon'])\n ))",
"def test_get_error_data_some_col_errors(self):\n field_setup = None\n error_names = ['first', 'billing_address_1', 'billing_country_area']\n prepared_info = self.setup_error_data(field_setup, error_names)\n for row in prepared_info:\n self.assertEqual(row['expected'], row['actual'])\n pass",
"def check_errors(self, data):\n for entry in data:\n if entry.find('ERROR') != -1:\n return entry\n return False",
"def check_get_errors(self):\n raise NotImplementedError(\"Implement it in a subclass.\")",
"def __verify_root(self):\n output = self.output\n for key in self.data:\n if key != self.root:\n output[\"status\"] = False\n output[\"message\"] = \"{0} is not is the correct format.\"\n print(\"-- An Error Occurred -- {0}\".format(output[\"message\"]))\n break\n return output",
"def test_check_invalid_centering():\n try:\n import pytest\n except:\n poppy._log.warning('Skipping test test_check_invalid_centering because pytest is not installed.')\n return # We can't do this test if we don't have the pytest.raises function.\n\n # MFT setup style and execute\n\n with pytest.raises(ValueError) as excinfo:\n mft = matrixDFT.MatrixFourierTransform(centering='some garbage value', verbose=True)\n assert excinfo.value.message == 'Error: centering method must be one of [SYMMETRIC, ADJUSTIBLE, FFTRECT, FFTSTYLE]'",
"def errors(self) -> List[Error]:",
"def testFailed(self):\r\n failedExprKeys = list(self.__testFailedExpressions.keys())\r\n for i in range(len(failedExprKeys)):\r\n for expr in self.__testFailedExpressions[failedExprKeys[i]]:\r\n self.__Calculator.setExpression(expr)\r\n self.__Calculator.calculateResult()\r\n self.assertEqual(self.__testErrors[failedExprKeys[i]], self.__Calculator.getError())",
"def assert_no_error(self): \r\n Nx = self['Nx']\r\n Nt = self.m.Nt\r\n L, T = self.problem['L T'.split()]\r\n L = L/2 # only half the domain used (symmetry)\r\n x = np.linspace(0, L, Nx+1) # Mesh points in space \r\n t = np.linspace(0, T, Nt+1) # Mesh points in time\r\n \r\n for n in range(len(t)):\r\n u_e = self.problem.u_exact(x, t[n])\r\n diff = np.abs(self.f.u[n,:] - u_e).max()\r\n print 'diff:', diff\r\n tol = 1E-13\r\n assert diff < tol",
"def calc_errors(test_data, loc_by_img):\n one_km_count = 0\n five_km_count = 0\n ten_km_count = 0\n hundred_km_count = 0\n thousand_km_count = 0\n other_count = 0\n for test_img in test_data:\n img_id = test_img['watchlink']\n img_result_loc = loc_by_img[img_id]\n img_actual_loc = Location(float(test_img['latitude']), float(test_img['longitude']))\n error = Location.dist(img_result_loc, img_actual_loc)\n if error < 1:\n one_km_count += 1\n elif error < 5:\n five_km_count += 1\n elif error < 10:\n ten_km_count += 1\n elif error < 100:\n hundred_km_count += 1\n elif error < 1000:\n thousand_km_count += 1\n else:\n other_count += 1\n return [one_km_count, five_km_count, ten_km_count, hundred_km_count, thousand_km_count, other_count]",
"def test_init_errors(self):\n t = self.Test({})\n self.assertEqual(t.errors, {})"
] | [
"0.68639857",
"0.65659684",
"0.6317057",
"0.6280998",
"0.624511",
"0.62418723",
"0.6231955",
"0.6174314",
"0.6160829",
"0.6155387",
"0.6125272",
"0.61134624",
"0.6105969",
"0.60783327",
"0.606288",
"0.60506743",
"0.604379",
"0.6016345",
"0.60074997",
"0.6006396",
"0.6003036",
"0.5999768",
"0.5980903",
"0.5960872",
"0.59525925",
"0.5939286",
"0.59228396",
"0.5922243",
"0.59138477",
"0.59055305"
] | 0.73111194 | 0 |
Returns the highest magnification for the slide | def highest_mag(slide):
return int(slide.properties['aperio.AppMag']) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mag(self):\n return self.photosamplers.get_estimate(mag=True)[0]",
"def get_mag_for_size(slide, size):\n max_size = slide.dimensions\n max_mag = highest_mag(slide)\n downsample = np.average([max_dim/size_dim for max_dim, size_dim in zip(max_size, size)])\n return max_mag/downsample",
"def get_thumbnail_magnification(slide):\n ratio = np.asarray(slide.dimensions) / np.asarray(slide.associated_images[\"thumbnail\"].size)\n # np.sqrt(np.prod(ratio))\n return ratio",
"def get_size_for_mag(slide, mag):\n max_size = slide.dimensions\n max_mag = highest_mag(slide)\n downsample = max_mag/mag\n return [np.int(np.round(dim/downsample)) for dim in max_size]",
"def getNativeMagnification(self):\n pixelInfo = self._tiffDirectories[-1].pixelInfo\n mm_x = pixelInfo.get('mm_x')\n mm_y = pixelInfo.get('mm_y')\n # Estimate the magnification if we don't have a direct value\n mag = pixelInfo.get('magnification') or 0.01 / mm_x if mm_x else None\n return {\n 'magnification': mag,\n 'mm_x': mm_x,\n 'mm_y': mm_y,\n }",
"def get_level_for_mag(slide, mag):\n level_mags_rounded = list(np.round(level_mags(slide), decimals = 2))\n if mag in level_mags_rounded:\n return level_mags_rounded.index(mag)\n else: \n return None",
"def get_level_mag(slide, level):\n return level_mags(slide)[level]",
"def get_mag(self):\n raise NotImplementedError",
"def level_mags(slide):\n return [highest_mag(slide)/downsample for downsample in slide.level_downsamples]",
"def largestResolution(resolutions):\n return resolutions[0]",
"def getNativeMagnification(self):\n return self._nativeMagnification.copy()",
"def _select_largest_photo(self, sizes):\n\n max_size = 0\n photo = ''\n for size in sizes:\n w = size['width']\n h = size['height']\n if w * h >= max_size:\n max_size = w * h\n photo = size['url']\n return photo",
"def read_slide_at_mag(slide, mag):\n exact_level = get_level_for_mag(slide, mag)\n if exact_level is not None:\n return slide.read_region((0,0), exact_level, get_level_size(slide, exact_level))\n else:\n max_size = slide.dimensions\n region_size = tuple(get_size_for_mag(slide, mag))\n downsample = np.average([max_dim/region_dim for max_dim, region_dim in zip(max_size, region_size)])\n best_level = slide.get_best_level_for_downsample(downsample)\n best_level_size = get_level_size(slide, best_level)\n best_level_img = slide.read_region((0,0), best_level, best_level_size)\n return best_level_img.resize(region_size, resample = Image.BICUBIC)",
"def maxResolution(self,wave = None):\n\n d = 2000.0*self.height*math.tan(self.angle/2) # Max pathlength in microns.\n dn = self.n.getDerivative(wave) # dn/dy of materail\n return d*dn #",
"def max_scale_image(self):\n maximum = np.argmax(self.transform, 0)\n return self.scale_array[maximum] * (self.support.sum(0) > 0)",
"def sort_maxside(sprite):\n return max(sprite.width, sprite.height)",
"def _get_max_estimated_bandit(self)->Bandit:\n # print(\"mus - \", self.mu)\n # print(\"actions - \", np.argmax(self.mu))\n unique, counts = np.unique(self.mu, return_counts=True)\n lens = counts[np.argmax(unique)] \n if lens>1: # if two actions have same argmax\n # then return arbitrarily from those max ones\n maxs = list(np.array(self.bandits)[self.mu==unique[np.argmax(unique)]])\n return np.random.choice(maxs)\n # otherwise return the max one\n return self.bandits[np.argmax(self.mu)]",
"def getImageMax(self):\n fname = '%s::%s'%(self.__class__.__name__, self.getImageMax.__name__)\n if (not self.lhaveImage):\n print(\"%s: DSM image not yet computed\"%fname)\n return None, None\n maxIndex = c_int(1)\n maxValue = c_float(1)\n ierr = c_int(1)\n self.lib.xcloc_getImageMax(maxIndex, maxValue, ierr)\n if (ierr.value != 0):\n print(\"%s: Failed to get max value and index of DSM image\"%fname)\n return None, None\n imax = maxIndex.value - 1 # Fortran to C\n vmax = maxValue.value\n return imax, vmax",
"def get_max_density(self):\n max_density = str(self.density.index(min(self.density)) + 1)\n print(max_density)\n return max_density",
"def _maxAlien(self):\n maxA = 0\n for r in self._aliens:\n for y in r:\n if(y != None):\n maxA = max(maxA,y.x)\n return maxA",
"def max(self):\r\n\t\treturn max(self.sample)",
"def _calculate_magnification(self, times):\n if self._model.n_lenses == 2:\n factor = 10.\n params = self._model.parameters\n t_1 = params.t_0 - factor * params.t_E\n t_2 = params.t_0 + factor * params.t_E\n self._model.set_magnification_methods([t_1, 'VBBL', t_2])\n self._model.set_default_magnification_method(\n 'point_source_point_lens')\n\n magnification = self._model.magnification(times)\n return magnification",
"def MaxSlMsd(self):\r\n\t\treturn self._get_attribute('maxSlMsd')",
"def mag(self) -> complex:\n return self.major_extent",
"def get_min_mag_edge(self):\r\n\t\treturn self.min_mag",
"def peak_height(self):\n return np.array([max(self.waveform[ch]) for ch in range(self.nchannels)])",
"def _get_maxth(self):\n return self.__maxth",
"def _get_maxth(self):\n return self.__maxth",
"def _get_maxth(self):\n return self.__maxth",
"def _get_maxth(self):\n return self.__maxth"
] | [
"0.7143382",
"0.6984062",
"0.6947595",
"0.6700194",
"0.6502702",
"0.6465279",
"0.6439392",
"0.6389368",
"0.62770873",
"0.62758964",
"0.6203424",
"0.6182897",
"0.6060569",
"0.6010728",
"0.6000861",
"0.59334785",
"0.5931892",
"0.59017086",
"0.5889821",
"0.58670044",
"0.584315",
"0.5836564",
"0.5819528",
"0.58193827",
"0.5767667",
"0.57545245",
"0.57338196",
"0.57338196",
"0.57338196",
"0.57338196"
] | 0.8554771 | 0 |
Returns the magnification for each level in a slide | def level_mags(slide):
return [highest_mag(slide)/downsample for downsample in slide.level_downsamples] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_level_mag(slide, level):\n return level_mags(slide)[level]",
"def get_thumbnail_magnification(slide):\n ratio = np.asarray(slide.dimensions) / np.asarray(slide.associated_images[\"thumbnail\"].size)\n # np.sqrt(np.prod(ratio))\n return ratio",
"def get_level_for_mag(slide, mag):\n level_mags_rounded = list(np.round(level_mags(slide), decimals = 2))\n if mag in level_mags_rounded:\n return level_mags_rounded.index(mag)\n else: \n return None",
"def _calculate_magnification(self, times):\n if self._model.n_lenses == 2:\n factor = 10.\n params = self._model.parameters\n t_1 = params.t_0 - factor * params.t_E\n t_2 = params.t_0 + factor * params.t_E\n self._model.set_magnification_methods([t_1, 'VBBL', t_2])\n self._model.set_default_magnification_method(\n 'point_source_point_lens')\n\n magnification = self._model.magnification(times)\n return magnification",
"def get_size_for_mag(slide, mag):\n max_size = slide.dimensions\n max_mag = highest_mag(slide)\n downsample = max_mag/mag\n return [np.int(np.round(dim/downsample)) for dim in max_size]",
"def highest_mag(slide):\n return int(slide.properties['aperio.AppMag'])",
"def get_mag_for_size(slide, size):\n max_size = slide.dimensions\n max_mag = highest_mag(slide)\n downsample = np.average([max_dim/size_dim for max_dim, size_dim in zip(max_size, size)])\n return max_mag/downsample",
"def getNativeMagnification(self):\n pixelInfo = self._tiffDirectories[-1].pixelInfo\n mm_x = pixelInfo.get('mm_x')\n mm_y = pixelInfo.get('mm_y')\n # Estimate the magnification if we don't have a direct value\n mag = pixelInfo.get('magnification') or 0.01 / mm_x if mm_x else None\n return {\n 'magnification': mag,\n 'mm_x': mm_x,\n 'mm_y': mm_y,\n }",
"def get_level_size(slide, level):\n return slide.level_dimensions[level]",
"def mag(self):\n return self.photosamplers.get_estimate(mag=True)[0]",
"def read_slide_at_mag(slide, mag):\n exact_level = get_level_for_mag(slide, mag)\n if exact_level is not None:\n return slide.read_region((0,0), exact_level, get_level_size(slide, exact_level))\n else:\n max_size = slide.dimensions\n region_size = tuple(get_size_for_mag(slide, mag))\n downsample = np.average([max_dim/region_dim for max_dim, region_dim in zip(max_size, region_size)])\n best_level = slide.get_best_level_for_downsample(downsample)\n best_level_size = get_level_size(slide, best_level)\n best_level_img = slide.read_region((0,0), best_level, best_level_size)\n return best_level_img.resize(region_size, resample = Image.BICUBIC)",
"def get_illustrations(self):\n \n temp=[[\"middle\",[],0,0],[\"middle\",[],0,0],[\"center\",[],0,0]]\n for pic in self.illustrations.all():\n \n if pic.position==\"L\":\n temp[0][1].append(pic)\n temp[0][2]=max(temp[0][2],pic.width)\n temp[0][3]+=pic.height\n elif pic.position==\"R\":\n temp[1][1].append(pic)\n temp[1][2]=max(temp[1][2],pic.width)\n temp[1][3]+=pic.height\n else:\n temp[2][1].append(pic)\n temp[2][2]+=pic.width\n temp[2][3]=max(temp[2][3],pic.height)\n temp[0][3]=max(temp[0][3],temp[1][3])\n temp[1][3]=temp[0][3]\n if len(temp[2][1])>0:\n pos = temp[2][1][0].position\n if pos == \"BR\":\n temp[2][0] = \"right\"\n elif pos == \"BL\":\n temp[2][0] = \"left\"\n for i in range(2):\n if len(temp[i][1])>0:\n pos = temp[i][1][0].position\n if pos in [\"RT\",\"LT\"]:\n temp[i][0] = \"top\"\n elif pos in [\"RB\",\"LB\"]:\n temp[i][0] = \"bottom\"\n self.text_size=(-temp[0][2]-temp[1][2],-temp[2][3])\n print(temp)\n return temp",
"def zoomlevels(self):\n return self._bboxes[0][1] #TODO: merge all coverages",
"def get_mag(self):\n raise NotImplementedError",
"def _magsamples(self):\n if self._derived_properties[\"magsamples\"] is None:\n if self.lbda is None:\n raise AttributeError(\"lbda not set.\")\n self.derive_magsamples()\n \n return self._derived_properties[\"magsamples\"]",
"def extract_level_from_name(self):\n images = glob.glob(os.path.join(self.frame_dir, '*'))\n level = []\n for i, im in enumerate(images):\n base, tail = os.path.split(im)\n name = tail.split('.')[-2]\n number = name.split('_')[-1]\n level.append(float(number))\n return np.array(level)",
"def medoidMosaic(self,collection):\n \n\t\t# calculate the median of temp band\n\t\tthermal = ee.ImageCollection(collection.select(['thermal'])).median()\n \n\t\tcollection = collection.select(self.env.divideBands)\n\n\t\tbandNames = self.env.divideBands;\n\t\tbandNumbers = ee.List.sequence(1,bandNames.length());\n \n\t\t# calculate medion\n\t\tmedian = ee.ImageCollection(collection).median()\n \n\t\tdef subtractmedian(img):\n\t\t\tdiff = ee.Image(img).subtract(median).pow(ee.Image.constant(2));\n\t\t\treturn diff.reduce('sum').addBands(img);\n \n\t\tmedoid = collection.map(subtractmedian)\n \n\t\tmedoid = ee.ImageCollection(medoid).reduce(ee.Reducer.min(bandNames.length().add(1))).select(bandNumbers,bandNames);\n \n\t\treturn medoid.addBands(thermal);",
"def getNativeMagnification(self):\n return self._nativeMagnification.copy()",
"def zoomlevels(self):\n return self._bboxes[0][1] #TODO: merge all coverages",
"def getMagnitudes(self):\n return self._bmag, self._vmag, self._jmag, self._hmag, self._kmag",
"def load_slide(self, fileName):\n image = open_slide(fileName)\n dims = image.level_dimensions\n ratio = np.array(image.level_dimensions[0])/np.array(image.level_dimensions[-1])\n\n return image, dims, ratio",
"def setMagnificationsInTiltSeries(self, TiltSeries_):\n kk = 0\n for proj in TiltSeries_._ProjectionList._list:\n proj.setAlignmentMagnification(self._alignmentMagnifications[kk])\n kk = kk + 1",
"def detail(self):\n return self.uniform(\"detail\",\n self.img_scale * .05,\n self.img_scale * .2)",
"def add_mosaics(self):\n for tree in self.mosaictrees:\n self.add_mosaic(tree, -1)",
"def getMagnificationsFromTiltSeries(self, TiltSeries_):\n # initialize alignment in seperate array - easier for optimization\n self._alignmentMagnifications = len(TiltSeries_._ProjectionList._list) * [1.]\n for (kk, proj) in enumerate(TiltSeries_._ProjectionList._list):\n self._alignmentMagnifications[kk] = proj.getAlignmentMagnification()\n return self._alignmentMagnifications",
"def _get_med(self):\n return self.__med",
"def tile_gen_at_mag(wsi, mag, tile_size):\n #Get size of WSI at Level 0 (Max Magnification)\n x0, y0 = wsi.level_dimensions[0]\n #Get size of WSI at the mag we want\n x_mag, y_mag = get_size_for_mag(wsi, mag)\n x_tiles = int(np.floor(x_mag/tile_size))\n y_tiles = int(np.floor(y_mag/tile_size))\n #Scale tile size accordingly\n scale = highest_mag(wsi)/mag\n yield (x_tiles, y_tiles)\n tiles = []\n for y in range(y_tiles):\n for x in range(x_tiles):\n x_coord = round(x*scale*tile_size)\n y_coord = round(y*scale*tile_size)\n scaled_tile_size = round(scale*tile_size)\n tile = wsi.read_region((x_coord, y_coord), 0, (scaled_tile_size, scaled_tile_size))\n yield tile.resize((tile_size, tile_size), resample = Image.BICUBIC)",
"def getMagnification(self, pixelSize=0.0129, isFilter=False, erodeIter=None):\r\n\r\n if not hasattr(self, 'determinantMap'):\r\n _ = self._getDeterminantMap()\r\n\r\n if hasattr(self, 'finalPathesMarked'):\r\n finalPatches = self.finalPatchesMarked\r\n elif hasattr(self, 'finalPatches'):\r\n finalPatches = self.finalPatches\r\n else:\r\n self.processTrial()\r\n finalPatches = self.finalPatches\r\n\r\n magMap = 1 / self.determinantMap\r\n\r\n if isFilter:\r\n magMap = ni.filters.gaussian_filter(magMap, self.params['signMapFilterSigma'])\r\n\r\n # get mean power amplitude for all visual areas normalized by V1\r\n magDict = {}\r\n for key, patch in finalPatches.items():\r\n array = patch.array.astype(np.float)\r\n\r\n if erodeIter:\r\n array = ni.binary_erosion(array, iterations=erodeIter)\r\n\r\n area = np.sum(array)\r\n\r\n totalMag = np.sum(array * magMap)\r\n\r\n magDict.update({key: (pixelSize ** 2) * totalMag / area})\r\n\r\n return magDict",
"def get_magmom_string():\n\n magmoms = []\n poscar_lines = open('POSCAR').readlines()\n elements = poscar_lines[5].split()\n amounts = poscar_lines[6].split()\n for i in range(len(elements)):\n if Element(elements[i]).is_transition_metal:\n magmoms.append('{}*6.0'.format(amounts[i]))\n else:\n magmoms.append('{}*0.5'.format(amounts[i]))\n return ' '.join(magmoms)",
"def calculateRatio(levelDims):\n highestReso = np.asarray(levelDims[0])\n lowestReso = np.asarray(levelDims[-1])\n Xratio, Yratio = highestReso/lowestReso\n return (Xratio, Yratio)"
] | [
"0.7798809",
"0.6923862",
"0.66853",
"0.65677786",
"0.62147903",
"0.6124908",
"0.61031574",
"0.6090716",
"0.6088541",
"0.58556306",
"0.5820038",
"0.56304854",
"0.5626129",
"0.56065404",
"0.5559378",
"0.54275894",
"0.54266506",
"0.5425498",
"0.53985816",
"0.53818405",
"0.5253527",
"0.5240948",
"0.51987",
"0.5192708",
"0.51578003",
"0.5132643",
"0.5086026",
"0.50854266",
"0.50435466",
"0.49884495"
] | 0.78430814 | 0 |
Returns the dimensions of a level | def get_level_size(slide, level):
return slide.level_dimensions[level] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dimensions():",
"def getDimensions():",
"def _get_ndim(self):\n return len(self.level_shapes[0])",
"def depth(self):\n return _libsbml.Dimensions_depth(self)",
"def dimensions(self) -> int:\n return pulumi.get(self, \"dimensions\")",
"def get_map_size(level):\n if level < 5:\n return 5, 5\n if level < 70:\n return 10, 10\n if level < 150:\n return 25, 25\n return 50, 50",
"def get_dim(self, name):\n return len(self.root_group.dimensions[name])",
"def get_dimension_length(self):\n pass",
"def dimension(self):",
"def dimensions(self, varname):\n if self.handle == None: return None\n try:\n var = self.handle.variables[varname]\n except KeyError:\n return None\n return var.dimensions",
"def dimension(self):\n\t\treturn self.d",
"def dims(self):\n return self[0].dims",
"def getDepth(self):\n return _libsbml.Dimensions_getDepth(self)",
"def dims(self):\n return self.v.dims() # TODO: check (empty? etc)\n #return self.t.shape # TODO: check (empty? etc)\n # TODO: convert to tuple? here / in varset?",
"def levshape(self) -> Shape:\n return tuple(len(x) for x in self.levels)",
"def size(self, level=None):\n level = level or self.local_variables\n names = {}\n while level:\n for name in level.bindings:\n names[name] = 1\n level = level.parent\n return len(names)",
"def getDimensions(unique_name=None):",
"def dimensions(self):\n return self.index.names",
"def N(self):\n return self._dimensions",
"def dimension(self) -> float:\n return self._dimensions",
"def getDimensions(self):\n\t\tprint \"Returning\",self.x,self.y,self.slicesPerTimepoint\n\t\treturn (self.x, self.y, self.slicesPerTimepoint)",
"def get_dimensions(self):\r\n x = []\r\n y = []\r\n z = []\r\n for i in self.verts:\r\n x.append(i[0])\r\n y.append(i[1])\r\n z.append(i[2])\r\n\r\n x.append(abs(min(x)))\r\n y.append(abs(min(y)))\r\n z.append(abs(min(z)))\r\n\r\n return max(x), max(y), max(z)",
"def n_levels(self):\n return len(self.scales)",
"def getLevels():",
"def get_dim():\n return (Settings.width, Settings.height)",
"def dims(self):\n return tuple(d for d in (v.states for v in self.__vars)) if len(self.__vars) else (1,)",
"def get_dimension(self):\n return",
"def dim(self):\n\t\treturn self.D",
"def getDimensions(self):\n\ttop = self.getTop()\n\tleft = self.getLeft()\n\twidth = self.getWidth()\n\theight = self.getHeight()\n\treturn top, left, width, height",
"def dimensions(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"dimensions\")"
] | [
"0.7337827",
"0.7250944",
"0.7064121",
"0.7035364",
"0.6847221",
"0.67899704",
"0.67407316",
"0.6720339",
"0.66824",
"0.66605484",
"0.66371953",
"0.6546749",
"0.6472022",
"0.6469843",
"0.64618737",
"0.6444262",
"0.6423922",
"0.6412058",
"0.6396843",
"0.6379703",
"0.6365779",
"0.63647896",
"0.63589346",
"0.6353814",
"0.63318825",
"0.63226545",
"0.62823856",
"0.6281621",
"0.62745583",
"0.6257524"
] | 0.8075793 | 0 |
Returns the magnification at a particular level | def get_level_mag(slide, level):
return level_mags(slide)[level] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _calculate_magnification(self, times):\n if self._model.n_lenses == 2:\n factor = 10.\n params = self._model.parameters\n t_1 = params.t_0 - factor * params.t_E\n t_2 = params.t_0 + factor * params.t_E\n self._model.set_magnification_methods([t_1, 'VBBL', t_2])\n self._model.set_default_magnification_method(\n 'point_source_point_lens')\n\n magnification = self._model.magnification(times)\n return magnification",
"def getNativeMagnification(self):\n pixelInfo = self._tiffDirectories[-1].pixelInfo\n mm_x = pixelInfo.get('mm_x')\n mm_y = pixelInfo.get('mm_y')\n # Estimate the magnification if we don't have a direct value\n mag = pixelInfo.get('magnification') or 0.01 / mm_x if mm_x else None\n return {\n 'magnification': mag,\n 'mm_x': mm_x,\n 'mm_y': mm_y,\n }",
"def get_level_for_mag(slide, mag):\n level_mags_rounded = list(np.round(level_mags(slide), decimals = 2))\n if mag in level_mags_rounded:\n return level_mags_rounded.index(mag)\n else: \n return None",
"def level_mags(slide):\n return [highest_mag(slide)/downsample for downsample in slide.level_downsamples]",
"def mag(self):\n return self.photosamplers.get_estimate(mag=True)[0]",
"def get_mag(self):\n raise NotImplementedError",
"def get_thumbnail_magnification(slide):\n ratio = np.asarray(slide.dimensions) / np.asarray(slide.associated_images[\"thumbnail\"].size)\n # np.sqrt(np.prod(ratio))\n return ratio",
"def getNativeMagnification(self):\n return self._nativeMagnification.copy()",
"def highest_mag(slide):\n return int(slide.properties['aperio.AppMag'])",
"def get_level_size(slide, level):\n return slide.level_dimensions[level]",
"def get_mag_for_size(slide, size):\n max_size = slide.dimensions\n max_mag = highest_mag(slide)\n downsample = np.average([max_dim/size_dim for max_dim, size_dim in zip(max_size, size)])\n return max_mag/downsample",
"def detail(self):\n return self.uniform(\"detail\",\n self.img_scale * .05,\n self.img_scale * .2)",
"def mag(self) -> complex:\n return self.major_extent",
"def resolution(self, level):\n return 2 ** (level - 1)",
"def magnitude(x):\n return x.magnitude if hasattr(x, 'magnitude') else x",
"def mag2(self) -> numbers.Number:\n mv_val = self.layout.gmt_func(self.layout.adjoint_func(self.value), self.value)\n return mv_val[0]",
"def read_slide_at_mag(slide, mag):\n exact_level = get_level_for_mag(slide, mag)\n if exact_level is not None:\n return slide.read_region((0,0), exact_level, get_level_size(slide, exact_level))\n else:\n max_size = slide.dimensions\n region_size = tuple(get_size_for_mag(slide, mag))\n downsample = np.average([max_dim/region_dim for max_dim, region_dim in zip(max_size, region_size)])\n best_level = slide.get_best_level_for_downsample(downsample)\n best_level_size = get_level_size(slide, best_level)\n best_level_img = slide.read_region((0,0), best_level, best_level_size)\n return best_level_img.resize(region_size, resample = Image.BICUBIC)",
"def difficulty(mag):\n mag = float(mag)\n if mag <= -4:\n return \"Visible in daytime.\"\n elif mag <= 6:\n return \"Visible at night.\"\n else:\n flux = mag_def(\"%s x\" % mag)\n needed_flux = mag_def(\"6 x\")\n eye_area = math.pi * (0.005**2)\n needed_power = needed_flux * eye_area\n diameter = 2 * math.sqrt(needed_power / (flux*math.pi))\n return \"%s m telescope needed.\" % diameter",
"def _get_med(self):\n return self.__med",
"def enemyrawdmg(self):\n\n enemystr = globalvalues.ai.getstatus()[3]\n # rngfactor will ensure that regular mobs won't absolutely crush you\n rngfactor = float(float(random.randint(45, 65)) / 100)\n level = (\n globalvalues.p1.getlevel()\n - globalvalues.ai.getstatus()[0]\n )\n lvlfactor = float(1 - level * 0.05)\n\n return int((enemystr) * 102 * 0.12 * rngfactor * lvlfactor)",
"async def get_xp(level, command):\n if command == \"profile\":\n return 250 * level\n return int((2 * 350) * (2 ** (level - 2))) # 350 is base value (level 1)",
"def get_map_size(level):\n if level < 5:\n return 5, 5\n if level < 70:\n return 10, 10\n if level < 150:\n return 25, 25\n return 50, 50",
"def normalize_zoomlvl(lvl):\n if lvl < gMinZoomLevel:\n return gMinZoomLevel\n elif lvl > gMaxZoomLevel:\n return gMaxZoomLevel\n else:\n return lvl - gMinZoomLevel",
"def magnitude(pos):\n x, y = pos\n return x * x + y * y",
"def rscale(mag=10.0):\n if mag > 11.5:\n return 0.5\n elif mag > 11.0:\n return 1.0\n elif mag > 10.5:\n return 1.5\n elif mag > 10.0:\n return 1.5\n elif mag > 9.5:\n return 2.0\n elif mag > 9.0:\n return 2.5\n elif mag > 8.5:\n return 3.0\n else:\n return 3.5",
"def mab0(self):\n return GALEX_INFO[self.bandname][\"ABmag0\"]",
"def getMagnification(self, pixelSize=0.0129, isFilter=False, erodeIter=None):\r\n\r\n if not hasattr(self, 'determinantMap'):\r\n _ = self._getDeterminantMap()\r\n\r\n if hasattr(self, 'finalPathesMarked'):\r\n finalPatches = self.finalPatchesMarked\r\n elif hasattr(self, 'finalPatches'):\r\n finalPatches = self.finalPatches\r\n else:\r\n self.processTrial()\r\n finalPatches = self.finalPatches\r\n\r\n magMap = 1 / self.determinantMap\r\n\r\n if isFilter:\r\n magMap = ni.filters.gaussian_filter(magMap, self.params['signMapFilterSigma'])\r\n\r\n # get mean power amplitude for all visual areas normalized by V1\r\n magDict = {}\r\n for key, patch in finalPatches.items():\r\n array = patch.array.astype(np.float)\r\n\r\n if erodeIter:\r\n array = ni.binary_erosion(array, iterations=erodeIter)\r\n\r\n area = np.sum(array)\r\n\r\n totalMag = np.sum(array * magMap)\r\n\r\n magDict.update({key: (pixelSize ** 2) * totalMag / area})\r\n\r\n return magDict",
"def get_size_for_mag(slide, mag):\n max_size = slide.dimensions\n max_mag = highest_mag(slide)\n downsample = max_mag/mag\n return [np.int(np.round(dim/downsample)) for dim in max_size]",
"def _get_depth_map_scale_subfolder(self):\n if self.im_scale <= 0.25:\n if self.im_scale <= 0.125:\n return \"Depth/0.125/\"\n else:\n return \"Depth/0.25/\"\n else: \n return \"Depth/\"",
"def mag(self) -> float:\n return sqrt(self.sqr_mag())"
] | [
"0.67559737",
"0.6685309",
"0.66784066",
"0.65893006",
"0.6497956",
"0.62938315",
"0.61198086",
"0.6092812",
"0.6024545",
"0.5632027",
"0.55412483",
"0.5471536",
"0.54661393",
"0.5413815",
"0.5413614",
"0.53920174",
"0.53455603",
"0.5330778",
"0.5330044",
"0.5313709",
"0.5275029",
"0.52631474",
"0.5228527",
"0.5221894",
"0.51973796",
"0.5189064",
"0.5183032",
"0.51619035",
"0.516099",
"0.5150507"
] | 0.7867714 | 0 |
Get the level corresponding to a certain magnification, if available | def get_level_for_mag(slide, mag):
level_mags_rounded = list(np.round(level_mags(slide), decimals = 2))
if mag in level_mags_rounded:
return level_mags_rounded.index(mag)
else:
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_level_mag(slide, level):\n return level_mags(slide)[level]",
"def getLevel(unique_name):",
"def level_mags(slide):\n return [highest_mag(slide)/downsample for downsample in slide.level_downsamples]",
"def mag(self):\n return self.photosamplers.get_estimate(mag=True)[0]",
"def get_luminosity(name):\n all_data = mc.get('sensor_values')\n name = _lookup(name)\n try:\n return all_data[name][3]\n except KeyError:\n raise KeyError(\"No sensor with that name\")",
"def getNativeMagnification(self):\n pixelInfo = self._tiffDirectories[-1].pixelInfo\n mm_x = pixelInfo.get('mm_x')\n mm_y = pixelInfo.get('mm_y')\n # Estimate the magnification if we don't have a direct value\n mag = pixelInfo.get('magnification') or 0.01 / mm_x if mm_x else None\n return {\n 'magnification': mag,\n 'mm_x': mm_x,\n 'mm_y': mm_y,\n }",
"def zoomlevels(self):\n return self._bboxes[0][1] #TODO: merge all coverages",
"def get_level(rol):\n\treturn rol.level",
"def getLevel(self, level):\n mingroup = None\n groups = self.console.storage.getGroups()\n\n for x in groups:\n\n if x.level < level:\n continue\n\n if mingroup is None:\n mingroup = x\n continue\n\n if x.level < mingroup.level:\n mingroup = x\n\n return mingroup.name",
"def highest_mag(slide):\n return int(slide.properties['aperio.AppMag'])",
"def get_luminosity(self):\n\n if self.no_dist is False and self.no_flux is False:\n\n dist = self.distance\n snu = self.snu_at_1GHz\n lum = lumin(dist, snu)\n\n self.lum = lum\n else:\n self.lum = -1 # use -1 to indicate unknown luminosity\n return self.lum",
"def resolution(self, level):\n return 2 ** (level - 1)",
"def zoomlevels(self):\n return self._bboxes[0][1] #TODO: merge all coverages",
"def _level_info(entity):\n if entity.is_max_level():\n return 'Maxed'\n if entity.max_level is not None:\n return '{entity.level}/{entity.max_level}'.format(entity=entity)\n return entity.level",
"def getLevel(self):\n return self.level",
"def extract_level_from_name(self):\n images = glob.glob(os.path.join(self.frame_dir, '*'))\n level = []\n for i, im in enumerate(images):\n base, tail = os.path.split(im)\n name = tail.split('.')[-2]\n number = name.split('_')[-1]\n level.append(float(number))\n return np.array(level)",
"def get_prior_mag(mag_dict):\n print(\"GETTING MAGNITUDE\")\n print(mag_dict)\n print(type(mag_dict))\n mag = 17\n if not isinstance(mag_dict, dict):\n print(\"Not a dictionary so using mag=17\")\n return mag\n\n for k, v in mag_dict.items():\n mag = v['mag']\n\n try:\n mag = float(mag)\n except Exception as e:\n print(str(e), \"Error getting magnitude\")\n mag = 17\n print(mag)\n return mag",
"def getLevels():",
"def get_level_size(slide, level):\n return slide.level_dimensions[level]",
"def get_level(self, level):\n return",
"def read_slide_at_mag(slide, mag):\n exact_level = get_level_for_mag(slide, mag)\n if exact_level is not None:\n return slide.read_region((0,0), exact_level, get_level_size(slide, exact_level))\n else:\n max_size = slide.dimensions\n region_size = tuple(get_size_for_mag(slide, mag))\n downsample = np.average([max_dim/region_dim for max_dim, region_dim in zip(max_size, region_size)])\n best_level = slide.get_best_level_for_downsample(downsample)\n best_level_size = get_level_size(slide, best_level)\n best_level_img = slide.read_region((0,0), best_level, best_level_size)\n return best_level_img.resize(region_size, resample = Image.BICUBIC)",
"def get_mag(self):\n raise NotImplementedError",
"def _calculate_magnification(self, times):\n if self._model.n_lenses == 2:\n factor = 10.\n params = self._model.parameters\n t_1 = params.t_0 - factor * params.t_E\n t_2 = params.t_0 + factor * params.t_E\n self._model.set_magnification_methods([t_1, 'VBBL', t_2])\n self._model.set_default_magnification_method(\n 'point_source_point_lens')\n\n magnification = self._model.magnification(times)\n return magnification",
"def get_level(level_name):\n return LEVELS[level_name.upper()]",
"def test_changeIlluminationLevel(self):\n fade_to_black = \"Your environs fade to black due to Ineffable Spooky Magic.\"\n no_change = \"You do it. Swell.\"\n dark_to_light = \"Your environs are suddenly alight.\"\n brighten = \"Your environs seem slightly brighter.\"\n endarken = \"Your environs seem slightly dimmer.\"\n Manipulator.createFor(self.playerWrapper.actor)\n\n self._test(\n \"illuminate 0\",\n [fade_to_black],\n [fade_to_black])\n\n ll = self.store.findUnique(\n objects.LocationLighting,\n objects.LocationLighting.thing == self.location)\n self.assertEquals(ll.candelas, 0)\n\n self._test(\n \"illuminate 0\",\n [no_change])\n self.assertEquals(ll.candelas, 0)\n\n self._test(\n \"illuminate 100\",\n [dark_to_light],\n [dark_to_light])\n self.assertEquals(ll.candelas, 100)\n\n self._test(\n \"illuminate 110\",\n [brighten],\n [brighten])\n self.assertEquals(ll.candelas, 110)\n\n self._test(\n \"illuminate 100\",\n [endarken],\n [endarken])\n self.assertEquals(ll.candelas, 100)\n\n self._test(\n \"illuminate 0\",\n [fade_to_black],\n [fade_to_black])\n self.assertEquals(ll.candelas, 0)",
"def get_multiplier(quality):\n\n if quality == \"low\":\n return 5\n elif quality == \"medium\":\n return 6\n elif quality == \"good\":\n return 7\n elif quality == \"high\":\n return 8\n return 6",
"def difficulty(mag):\n mag = float(mag)\n if mag <= -4:\n return \"Visible in daytime.\"\n elif mag <= 6:\n return \"Visible at night.\"\n else:\n flux = mag_def(\"%s x\" % mag)\n needed_flux = mag_def(\"6 x\")\n eye_area = math.pi * (0.005**2)\n needed_power = needed_flux * eye_area\n diameter = 2 * math.sqrt(needed_power / (flux*math.pi))\n return \"%s m telescope needed.\" % diameter",
"def getNativeMagnification(self):\n return self._nativeMagnification.copy()",
"def get_levels(self):\n return self.levels[self.game]",
"def track_moisture_level():\n try:\n normal_level_init = 470\n low_level_init = 560\n\n global LIMIT_FLAG\n sensor_read = sensorData.read_moisture()\n generate_json.define_structure(\"moisture\", sensor_read)\n\n if sensor_read > low_level_init:\n if LIMIT_FLAG != 3:\n # When it is dry (Moisture Level Low)\n LIMIT_FLAG = 3\n blynk.notify('Moisture Level Low! Irrigation Needed')\n blynk.email('[email protected]', 'Alert: Moisture Level Low',\n 'Moisture Level Low! Irrigation Needed')\n logging_write()\n elif normal_level_init <= sensor_read <= low_level_init:\n if LIMIT_FLAG != 2:\n LIMIT_FLAG = 2\n logging_write()\n else:\n if LIMIT_FLAG != 1:\n LIMIT_FLAG = 1\n logging_write()\n return sensor_read\n\n except Exception as e:\n logging_write(e)"
] | [
"0.76260245",
"0.6262649",
"0.62293607",
"0.61587805",
"0.60415137",
"0.6033318",
"0.58130866",
"0.5732402",
"0.57084596",
"0.5647901",
"0.5613819",
"0.5604842",
"0.5599045",
"0.55820286",
"0.5574072",
"0.5559694",
"0.55576396",
"0.555535",
"0.55302167",
"0.5511368",
"0.5494198",
"0.54852396",
"0.5452814",
"0.54244727",
"0.54218197",
"0.5421613",
"0.5416685",
"0.54048425",
"0.53021735",
"0.52995604"
] | 0.7825988 | 0 |
Get the image size the highest magnification image would have to be resized to get an equivalent magnification | def get_size_for_mag(slide, mag):
max_size = slide.dimensions
max_mag = highest_mag(slide)
downsample = max_mag/mag
return [np.int(np.round(dim/downsample)) for dim in max_size] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getZoomFactor(imageSize, maxW, maxH):\n\timageW, imageH = imageSize\n\tzoomW = float(imageW) / float(maxW)\n\tzoomH = float(imageH) / float(maxH)\n\treturn max(zoomW, zoomH)",
"def get_image_size(self):",
"def get_thumbnail_magnification(slide):\n ratio = np.asarray(slide.dimensions) / np.asarray(slide.associated_images[\"thumbnail\"].size)\n # np.sqrt(np.prod(ratio))\n return ratio",
"def _image_resolution(image_filename):\n img = mpimg.imread(image_filename)\n return img.shape",
"def get_mag_for_size(slide, size):\n max_size = slide.dimensions\n max_mag = highest_mag(slide)\n downsample = np.average([max_dim/size_dim for max_dim, size_dim in zip(max_size, size)])\n return max_mag/downsample",
"def image_size(size):\n l_max = max(size)\n if l_max > 300:\n num = l_max/300\n else:\n num = 1\n w = round(size[0] / num)\n h = round(size[1] / num)\n new_size = [w, h]\n return new_size",
"def img_scale(self):\n return min(400, abs(self.size))",
"def size(img):\n\treturn img.size",
"def calc_image_size(spr):\n return int(max(spr.label_safe_width(), 1)), \\\n int(max(spr.label_safe_height(), 1))",
"def get_size(img):\n ih, iw = img.shape[:2]\n return iw * ih",
"def get_image_size(self, **kwargs):\n points = kwargs['points']\n max_val = points.max(0)\n min_val = points.min(0)\n height = np.ceil((max_val[0] - min_val[0]) * self.res_x).astype(int)\n width = np.ceil((max_val[1] - min_val[1]) * self.res_y).astype(int)\n\n return height, width",
"def get_new_img_size(w, h, img_min_side = 600):\n if w <= h:\n f = float(img_min_side) / w\n resized_h = int(f * h)\n resized_w = img_min_side\n else:\n f = float(img_min_side) / h\n resized_w = int(f * w)\n resized_h = img_min_side\n \n return resized_w, resized_h",
"def _get_target_scale(self, im_size_min, im_size_max, target_size, max_size):\n im_scale = float(target_size) / float(im_size_min)\n # Prevent the biggest axis from being more than max_size\n if np.round(im_scale * im_size_max) > max_size:\n im_scale = float(max_size) / float(im_size_max)\n return im_scale",
"def _get_extended_image_size(height, width, patch_size, stride):\n\n ext_height, ext_width = 0, 0\n\n def sliding_distance(n_windows, window_size, stride):\n return window_size * n_windows - (window_size - stride) * (n_windows - 1)\n\n if height < patch_size:\n ext_height = patch_size\n else:\n for n in range(height):\n distance = sliding_distance(n, patch_size, stride)\n if distance > height:\n ext_height = distance\n break\n\n if width < patch_size:\n ext_width = patch_size\n else:\n for n in range(width):\n distance = sliding_distance(n, patch_size, stride)\n if distance > width:\n ext_width = distance\n break\n\n return ext_height, ext_width",
"def mag(self):\n return self.photosamplers.get_estimate(mag=True)[0]",
"def getScaledDimensions(size, max_size, returnFactor=False):\n\n width, height = size\n max_width, max_height = max_size\n if (max_width, max_height) == (0, 0) or (width, height) == (0, 0): return (0, 0)\n wfactor, hfactor = 1.0, 1.0\n\n if width > max_width: wfactor = float(max_width) / width\n if height > max_height: hfactor = float(max_height) / height\n\n factor = min(wfactor, hfactor)\n\n size = (width * factor, height * factor)\n\n if not returnFactor:\n return size\n else:\n return size, factor",
"def get_height():\n return resize.transforms[1].size",
"def GetBestSize(self):\n bmp = self._bitmap\n return wx.Size(bmp.GetWidth(), bmp.GetHeight())",
"def get_resize_to(image, size_x, size_y):\n scale_x = image.shape[0] // size_x\n scale_y = image.shape[1] // size_y\n if scale_x == 0 or scale_y == 0:\n return 3, None\n if image.shape[0] % scale_x != 0 or image.shape[1] % scale_y != 0:\n return 1, None\n if image.shape[0] < scale_x or image.shape[1] < scale_y:\n return 2, None\n\n arrays = []\n for i in range(scale_x):\n for j in range(scale_y):\n arrays.append(image[i::scale_x, j::scale_y])\n\n result = mode(np.stack(arrays), axis=0).mode[0]\n if result.max() > 10:\n print(1)\n\n return 0, result",
"def pixel_size_ratio(self):\n return 2**(self.levels[-1] - self.levels[0])",
"def get_image_size(self, **kwargs):\n fov_height = np.abs(self.fov_pitch[1] - self.fov_pitch[0])\n fov_width = np.abs(self.fov_yaw[1] - self.fov_yaw[0])\n height = np.ceil(fov_height * self.res_pitch).astype(int)\n width = np.ceil(fov_width * self.res_yaw).astype(int)\n\n return height, width",
"def _SizeCalculator(partition_size):\n # Minus footer size to return max image size.\n return partition_size - int(math.pow(partition_size, 0.95))",
"def get_image_size(frame) -> tuple:\n return tuple(frame.shape[1::-1])",
"def calc_thumbnail_size(img):\n width, length = img.size\n ratio = width / length\n\n # for some reason, if it's exactly 224, then thumnailed image is 223\n dim = 224 + 1 # output dim\n if ratio > 1:\n size = (dim * ratio, dim)\n else:\n size = (dim, dim / ratio)\n# print(size)\n return size",
"def _select_largest_photo(self, sizes):\n\n max_size = 0\n photo = ''\n for size in sizes:\n w = size['width']\n h = size['height']\n if w * h >= max_size:\n max_size = w * h\n photo = size['url']\n return photo",
"def _get_image_size(self):\n return (3, 224, 224)",
"def resizeImage(image, maxW, maxH):\n\timageW, imageH = image.size\n\tif imageW == maxW and imageH == maxH:\n\t\treturn image\n\t# find which axis requires the biggest zoom (smallest relative max dimension)\n\tzoomW = float(imageW) / float(maxW)\n\tzoomH = float(imageH) / float(maxH)\n\tzoom = max(zoomW, zoomH)\n\tif zoomW >= zoomH:\t# size is defined by width\n\t\tmaxH = int(imageH//zoom)\t# calculate the new height\n\telse:\n\t\tmaxW = int(imageW//zoom)\n\treturn image.resize((maxW, maxH))",
"def GetSizeGreatestPrimeFactor(self) -> \"unsigned long long\":\n return _itkHalfHermitianToRealInverseFFTImageFilterPython.itkHalfHermitianToRealInverseFFTImageFilterICF2IF2_GetSizeGreatestPrimeFactor(self)",
"def recommended_size(img_shape):\r\n new_width = 512\r\n new_height = img_shape[0] / img_shape[1] * 512\r\n new_height = round(new_height / 32) * 32\r\n return new_width, new_height",
"def format_img_size(self, img, C):\n img_min_side = float(C.im_size)\n (height,width,_) = img.shape\n\n if width <= height:\n ratio = img_min_side/width\n new_height = int(ratio * height)\n new_width = int(img_min_side)\n else:\n ratio = img_min_side/height\n new_width = int(ratio * width)\n new_height = int(img_min_side)\n img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)\n return img, ratio"
] | [
"0.70223284",
"0.69094974",
"0.6904307",
"0.6871893",
"0.6834652",
"0.67987007",
"0.6753258",
"0.6662533",
"0.6641104",
"0.6629991",
"0.6623062",
"0.66070765",
"0.65710425",
"0.6462559",
"0.6438334",
"0.64352167",
"0.64264476",
"0.6413869",
"0.64073336",
"0.6405353",
"0.63959295",
"0.6393958",
"0.6378742",
"0.6371808",
"0.6340691",
"0.63312304",
"0.6328157",
"0.63233536",
"0.63168645",
"0.6313881"
] | 0.721434 | 0 |
Adds 5 latest blog posts as `latest_articles`, 5 latest comments as `latest_comments`, and all tags (annotated with `num_articles` field) as `tags` to the context, regardless of `request`. | def latest_content(request):
latest_articles = Article.published_articles()[:5]
latest_comments = Comment.objects.all().order_by('-pub_date')[:5]
tags = Tag.objects.annotate(num_articles=Count('article')).order_by(
'-num_articles')
contributors = Contributor.objects.annotate(
num_articles=Count('article')).order_by('-num_articles')
return {'latest_articles': latest_articles,
'latest_comments': latest_comments,
'tags': tags,
'contributors': contributors,
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def latest_blog_posts(self, request, *args, **kwargs):\n context = self.get_context(request, *args, **kwargs)\n context[\"latest_posts\"] = MyblogDetailPage.objects.live().public()[:1] \n return render(request, \"myblog/latest_posts.html\", context)",
"def last_five(request):\n flag_five = True\n topics = (\n request.db[\"topic\"].find().sort([(\"$natural\", -1), (\"topic_date\", -1)]).limit(5)\n )\n\n return render_to_response(\n \"templates/home.html\",\n {\"topics\": topics, \"flag_five\": flag_five, \"count\": count(request)},\n request=request,\n )",
"def get_context(self, request):\n articles = self.articles\n\n # Filtering by tag\n tag = request.GET.get('tag')\n if tag:\n articles = articles.filter(tags__name=tag)\n\n # Pagination, using the blog settings\n page = request.GET.get('page')\n page_number = BlogSettings.for_site(request.site).page_number\n paginator = Paginator(articles, page_number)\n try:\n articles = paginator.page(page)\n except PageNotAnInteger:\n articles = paginator.page(1)\n except EmptyPage:\n articles = paginator.page(paginator.num_pages)\n\n # Updating the template context\n context = super(Blog, self).get_context(request)\n context['articles'] = articles\n context['current_tag'] = tag\n return context",
"def get_context_data(self, **kwargs):\n data = super().get_context_data(**kwargs)\n posts = BlogPost.objects.all()\\\n .select_related(\"author\")\\\n .prefetch_related(\"tags\")\\\n .filter(is_published=True)\\\n .order_by('-published_at', 'id')\n\n if 'lastPublished' in self.request.GET:\n last_published = datetime.strptime(self.request.GET['lastPublished'], self.DATE_FORMAT)\n posts = posts.filter(published_at__lt=last_published)\n\n if 'tag_slug' in kwargs:\n posts = posts.filter(tags__slug__in=(kwargs['tag_slug'], ))\n\n if 'author_id' in kwargs:\n posts = posts.filter(author=kwargs['author_id'])\n\n data['has_more'] = posts.count() > 10\n\n posts = list(posts[:10])\n data['last_published'] = min([i.published_at for i in posts]) if posts else None\n data['last_published'] = data['last_published'].strftime(self.DATE_FORMAT) if data['last_published'] else None\n data['posts'] = posts\n\n return data",
"def get_recent_posts(self, request, count):\n if request.has_permission('edit'):\n return DBSession.query(Post).filter_by(blog=self).order_by('created desc').slice(0, count).all()\n else:\n return DBSession.query(Post).filter_by(blog=self, published=True).order_by('created desc').slice(0, count).all()",
"def get_context(self, request, *args, **kwargs):\n context = super().get_context(request, *args, **kwargs)\n all_posts = BlogDetailPage.objects.all().order_by('-first_published_at')\n paginator = Paginator(all_posts, 1)\n page = request.GET.get(\"page\")\n try:\n posts = paginator.page(page)\n except PageNotAnInteger:\n posts = paginator.page(1)\n except EmptyPage:\n posts = paginator.page(paginator.num_pages)\n\n context[\"posts\"] = posts\n context[\"authors\"] = BlogAuthor.objects.all()\n context[\"reverse_url\"] = self.reverse_subpage('latest_posts')\n context[\"categories\"] = BlogCategory.objects.all()\n return context",
"def blog(request):\n\tlatest_posts = Post.objects.all().order_by('-created_at')\n\tpopular_posts = Post.objects.all().order_by('-views')[:5]\n\tfor post in latest_posts:\n\t\tpost.url = encode_url(post.title)\n\tfor popular_post in popular_posts:\n\t\tpopular_post.url = encode_url(popular_post.title)\n\treturn render(request, 'blog/blog.html', {'latest_posts': latest_posts, \n\t\t\t\t\t\t\t\t\t\t\t 'popular_posts': popular_posts})",
"def latest(request):\n post_list = Post.objects.exclude(hidden = True).order_by('-created')\n paginator = Paginator(post_list, 10)\n try:\n page = int(request.GET.get('page', '1'))\n except ValueError:\n page = 1\n try:\n posts = paginator.page(page)\n except EmptyPage, InvalidPage:\n posts = paginator.page(paginator.num_pages)\n return render(request, 'blog/latest.html', {'posts': posts},\n context_instance = RequestContext(request))",
"def index(request):\n\n num_posts = BlogPost.objects.all().count()\n num_bloggers = Blogger.objects.all().count()\n latest_blog = BlogPost.objects.latest('post_date')\n\n context = {\n 'num_posts': num_posts,\n 'num_bloggers': num_bloggers,\n 'latest_blog': latest_blog,\n }\n\n return render(request, 'index.html', context=context)",
"def index(request, archive=False):\n context = {'archive':archive}\n posts = Post.objects.all()\n if not archive:\n posts = posts[:10]\n context['posts'] = posts\n if request.user.is_authenticated():\n #These are the new news items the logged in user has\n context['new_posts'] = NewBlog.objects.filter(user=request.user)\n return render(request, 'blog/index.html', context)",
"def topic_recent(request):\n posts = Post.objects.all().order_by(\"-created\")[:3]\n posts = mk_paginator(request, posts, DJANGO_SIMPLE_FORUM_REPLIES_PER_PAGE)\n # topic = Topic.objects.get(pk=topic_id)\n return render_to_response(\"forum/topic_recent.html\", add_csrf(request, posts=posts), context_instance=RequestContext(request))",
"def get_context(self, request, *args, **kwargs):\n context = super().get_context(request, *args, **kwargs)\n # Get all posts\n all_posts = BlogPage.objects.live().public() \\\n .order_by(\n '-first_published_at'\n )\n date_sorted_posts = sorted(\n all_posts, key=lambda p: p.specific.date, reverse=True\n )\n # Paginate all posts by 5 per page\n paginator = Paginator(date_sorted_posts, 5)\n # Try to get the ?page=x value\n page = request.GET.get(\"page\")\n try:\n # If the page exists and the ?page=x is an int\n posts = paginator.page(page)\n except PageNotAnInteger:\n # If the ?page=x is not an int; show the first page\n posts = paginator.page(1)\n except EmptyPage:\n # If the ?page=x is out of range (too high most likely)\n # Then return the last page\n posts = paginator.page(paginator.num_pages)\n\n # \"posts\" will have child pages; you'll need to use .specific in the template\n # in order to access child properties, such as youtube_video_id and subtitle\n context[\"posts\"] = posts\n return context",
"def top_5_news():\n top_five = []\n\n news = (db.news.find({}).sort([\n ('shares_count', pymongo.DESCENDING),\n ('comments_count', pymongo.DESCENDING),\n ('title', pymongo.ASCENDING)\n ]).limit(5))\n\n for new in news:\n top_five.append((new['title'], new['url']))\n\n return top_five",
"def get_recently_articles(cls, num):\n return cls.objects.values('title', 'view_times', 'update_time', 'author')\\\n .filter(status=0).order_by('-update_time')[:num]",
"def api_get_threads(request, count):\n\n if PARAMETER_TAG in request.GET:\n tag_name = request.GET[PARAMETER_TAG]\n if tag_name is not None:\n tag = get_object_or_404(Tag, name=tag_name)\n threads = tag.threads.filter(archived=False)\n else: \n threads = Thread.objects.filter(archived=False)\n\n if PARAMETER_OFFSET in request.GET:\n offset = request.GET[PARAMETER_OFFSET]\n offset = int(offset) if offset is not None else 0\n else:\n offset = 0\n\n threads = threads.order_by('-bump_time')\n threads = threads[offset:offset + int(count)]\n\n opening_posts = []\n for thread in threads:\n opening_post = thread.get_opening_post()\n\n # TODO Add tags, replies and images count\n opening_posts.append(_get_post_data(opening_post.id,\n include_last_update=True))\n\n return HttpResponse(content=json.dumps(opening_posts))",
"def recent(request):\r\n rdict = request.matchdict\r\n params = request.params\r\n\r\n # Make sure we generate a url to feed our rss link.\r\n current_route = request.current_route_url()\r\n\r\n # check for auth related stuff\r\n # are we looking for a specific user\r\n username = rdict.get('username', None)\r\n if username:\r\n username = username.lower()\r\n\r\n # do we have any tags to filter upon\r\n tags = rdict.get('tags', None)\r\n\r\n if isinstance(tags, str):\r\n tags = [tags]\r\n\r\n ret = {\r\n 'username': username,\r\n 'tags': tags,\r\n 'rss_url': current_route.replace('recent', 'rss')\r\n }\r\n\r\n # if we've got url parameters for the page/count then use those to help\r\n # feed the init of the ajax script\r\n ret['count'] = params.get('count') if 'count' in params else RESULTS_MAX\r\n ret['page'] = params.get('page') if 'page' in params else 0\r\n\r\n # Do we have any sorting criteria?\r\n ret['sort'] = params.get('sort') if 'sort' in params else None\r\n\r\n return ret",
"def get_recent_posts(self, numposts=10, blogid=1):\n return self.execute('metaWeblog.getRecentPosts', blogid, self.username, self.password, numposts)",
"def get_top_articles(\n limit: int = 5,\n date: int = int(datetime.now().strftime(\"%Y%m%d\"))\n):\n\n res = articles_db.get_top_articles_mongo(\n articles,\n limit,\n date\n )\n\n return res",
"def get_latest_posts(parser, token):\n\ttry:\n\t\ttag_name, arg = token.contents.split(None, 1)\n\texcept ValueError:\n\t\traise template.TemplateSyntaxError, \"%s tag requires arguments\" % token.contents.split()[0]\n\t\n\tm = re.search(r'(.*?) as (\\w+)', arg)\n\t\n\tif not m:\n\t\traise template.TemplateSyntaxError, \"%s tag had invalid arguments\" % tag_name\n\t\n\tformat_string, var_name = m.groups()\n\t\n\treturn LatestPosts(format_string[0], var_name)",
"def get_context(self, request, *args, **kwargs):\n\n context = super().get_context(request, *args, **kwargs)\n context['posts'] = BlogDetailPage.objects.live().public()\n return context",
"def add_new_posts(last_updated=None):\n for blog in Blog.objects.all():\n try:\n document = feedparser.parse(blog.feed_url)\n except:\n print \"error parsing\"\n continue\n\n if last_updated is None:\n print(\"- Adding %i articles from %s\" % (len(document['entries']), blog.title))\n\n for entry in document['entries']:\n # now we create a new post\n post = Post()\n post.blog = blog\n post.title = entry['title']\n\n if 'summary' in entry:\n post.content = entry['summary']\n if 'content' in entry:\n post.content = entry['content']\n\n post.link = entry['link']\n post.save()\n else:\n # TODO: only parse from a date\n pass",
"def GET_front_recent_posts(self, *a, **kw):\r\n # Server side cache is also invalidated when new article is posted\r\n return self.render_cached('recent-promoted', RecentPromotedArticles, g.side_posts_max_age)",
"def get_featured_articles(request):\n try:\n count = 1\n if 'count' in request.POST and int(request.POST['count']):\n count = int(request.POST['count'])\n\n newest_list = []\n for article in Article.objects.order_by('-modified')[:count]:\n newest_list.append(article.dump_to_dict())\n\n popular_list = []\n for article in Article.objects.order_by('-views')[:count]:\n popular_list.append(article.dump_to_dict())\n\n return format_ajax_response(True, \"Featured articles retrieved successfully.\", {'newest': newest_list,'popular': popular_list})\n except Exception as ex:\n logger.error(\"Failed to get_featured_articles: %s\" % ex)\n return format_ajax_response(False, \"There was an error retrieving the featured articles.\")",
"def sb_related_posts(post, num=5):\n # defer some of the heavier post fields once the defer+annotate bug has been fixed\n posts = Post.objects.published().filter(tag__in=post.tags).exclude(id=post.id).annotate(weight=models.Count('tag')).order_by('-weight', '-date')[:num]\n return dict(blog=Blog.get_active(), posts=posts)",
"def BestPosts(request):\r\n \r\n context = {\r\n # Sort by count\r\n 'posts': Post.objects.annotate(count=Count('likes')).order_by('-count')\r\n }\r\n return render(request, 'blog/best_posts.html', context=context)",
"def get_popular_articles():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n query_popular_articles = \"\"\"\n SELECT art.title, COUNT(lg.id) as views\n FROM articles as art\n JOIN log as lg\n ON art.slug = substring(lg.path,10)\n AND lg.status = '200 OK'\n GROUP BY art.title\n ORDER BY views desc\n LIMIT 3; \"\"\"\n c.execute(query_popular_articles)\n articles = from_db_cursor(c)\n db.close()\n return articles",
"def get_context(self, request, *args, **kwargs):\n context = super().get_context(request, *args, **kwargs)\n all_posts = PostPage.objects.live().public().order_by('-first_published_at') \n\n page = request.GET.get(\"page\")\n category = request.GET.get(\"category\")\n if category and BlogCategory.objects.filter(slug=category).exists():\n all_posts = all_posts.filter(categories__slug = category)\n\n paginator = Paginator(all_posts, 9)\n\n try:\n posts = paginator.page(page)\n except PageNotAnInteger:\n posts = paginator.page(1)\n except EmptyPage:\n posts = paginator.page(paginator.num_pages)\n\n context[\"posts\"] = posts\n context[\"categories\"] = BlogCategory.objects.all() \n\n return context",
"def dashboard_content_article_tag_cloud():\n tag_stats = dict()\n past_30 = offset_time_past(30, str=True)\n articles = mongo.db[app.config['ARTICLES_COLLECTION']]\n results = articles.find({'collected': {'$gt': past_30}}, {'_id': 0})\n for result in results:\n for tag in result.get('tags', list()):\n tag_stats[tag] = tag_stats.get(tag, 0) + 1\n tags_sorted = sorted(tag_stats.items(), key=operator.itemgetter(1),\n reverse=True)[:50]\n data = list()\n for item in tags_sorted:\n data.append({'name': item[0], 'weight': item[1]})\n return jsonify(data)",
"def three_most_popular_articles():\n\n # To print information\n information_string = '1. The 3 most popular articles of all time are:\\n'\n\n # Query string\n query = \"\"\"select title,count(*) as num from\n articles,log where\n log.path=CONCAT('/article/',articles.slug)\n group by articles.title\n order by num DESC limit 3;\"\"\"\n\n print(information_string)\n for result in query_db(query):\n print('\\t\"' + str(result[0]) + '\" - ' + str(result[1]) + ' views')\n\n print(\"\\n\")",
"def pull_articles(self, *args, **kwargs):\n tasks.pull_articles()\n return Response({})"
] | [
"0.6630519",
"0.6064325",
"0.5982739",
"0.5916848",
"0.58361304",
"0.5788325",
"0.57815117",
"0.5755909",
"0.57152796",
"0.56332666",
"0.56171244",
"0.5601291",
"0.5582968",
"0.54634035",
"0.5452473",
"0.53599924",
"0.5354808",
"0.5313329",
"0.52591276",
"0.522024",
"0.5218829",
"0.5195488",
"0.51946217",
"0.5189172",
"0.51653963",
"0.5148892",
"0.5133105",
"0.5123282",
"0.5082277",
"0.5056665"
] | 0.67686737 | 0 |
Format a Roku Channel name. | def format_channel_name(channel_number: str, channel_name: str | None = None) -> str:
if channel_name is not None and channel_name != "":
return f"{channel_name} ({channel_number})"
return channel_number | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def channel_name(radio_id: int, channel_id: int) -> str:\n return f\"COMM{radio_id} Ch {channel_id}\"",
"def channelName(self):\n channel_list = (\"Neutral\",\n \"BBC1\",\n \"BBC2\",\n \"ITV\",\n \"Channel 4\",\n \"Channel 5\")\n channel_name = channel_list[self.channel]\n return channel_name",
"def channel_name(self) -> str:\n return self._channel_name",
"def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")",
"def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")",
"def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")",
"def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")",
"def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")",
"def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")",
"def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")",
"def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")",
"def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")",
"def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")",
"def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")",
"def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")",
"def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")",
"def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")",
"def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")",
"def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")",
"def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")",
"def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")",
"def channel_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"channel_name\")",
"def fmt(competitor_name: str) -> str:\n name = competitor_name.replace(\"_a\", r\" $\\alpha$ \")\n name = name.replace(\"_b\", r\" $\\beta$ \")\n return name",
"def channel_string(self, pre=\"\", full=False):\n\n return \" \".join(pre+c.get_chanstr(full=full) for c in self.channels)",
"def get_channel_name(self, channelid, isdm=False):\n\n if isdm:\n return channelid\n\n request = SimpleRequest(self.headers).request\n channel = request.grab_page('https://discordapp.com/api/%s/channels/%s' % (self.api, channelid))\n\n if channel is not None and len(channel) > 0:\n return '%s_%s' % (channelid, self.safe_name(channel['name']))\n\n else:\n error('Unable to fetch channel name from id, generating one instead.')\n return '%s_%s' % (channelid, random_str(12))",
"def delivery_channel_name(self) -> str:\n return pulumi.get(self, \"delivery_channel_name\")",
"def delivery_channel_name(self) -> str:\n return pulumi.get(self, \"delivery_channel_name\")",
"def delivery_channel_name(self) -> str:\n return pulumi.get(self, \"delivery_channel_name\")",
"def button_string(channel, red, blue):\n return 'CH{:s}_{:s}_{:s}'.format(channel, red, blue)",
"def char_name(character_object, verbose_where=False, watch_list=None):\n watch_list = watch_list or []\n cname = character_object.name\n if character_object in watch_list:\n cname += \"{c*{n\"\n if character_object.player_ob and character_object.player_ob.db.lookingforrp:\n cname += \"|R+|n\"\n if not verbose_where:\n return cname\n if character_object.db.room_title:\n cname += \"{w(%s){n\" % character_object.db.room_title\n return cname"
] | [
"0.78294134",
"0.7059564",
"0.6983384",
"0.68293834",
"0.68293834",
"0.68293834",
"0.68293834",
"0.68293834",
"0.68293834",
"0.68293834",
"0.68293834",
"0.68293834",
"0.68293834",
"0.68293834",
"0.68293834",
"0.68293834",
"0.68293834",
"0.68293834",
"0.68293834",
"0.68293834",
"0.68293834",
"0.68293834",
"0.6251904",
"0.623915",
"0.6203214",
"0.6163108",
"0.6163108",
"0.6163108",
"0.6115342",
"0.6044168"
] | 0.7868498 | 1 |
fetcher.get_projects() should return a list of projects. | def test_get_projects_returns_projects(fc: fetcher.Fetcher):
projects = fc.get_projects()
assert isinstance(projects, list)
assert isinstance(projects[0], models.Project) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_projects(self):\n projects = []\n page = 1\n while not len(projects) % 100:\n projects += self._get('/projects?{0}'.format(urllib.urlencode({'per_page': 100, 'page': page})))\n if not projects:\n break\n page += 1\n return projects",
"def get_projects(self):\n return self.http_call(\"get\", url=f\"{self.base_url}/projects\").json()",
"def get_projects(self):\n response = self.request(verb=requests.get, address=\"projects\")\n # FIXME: if no results, must we raise an exception?\n return response[\"results\"] if \"results\" in response else response",
"def get_projects(self, _is_simple=False):\n req_url = f\"{self.url}/projects\"\n if _is_simple:\n req_url += \"?simple=true\"\n ret = requests.get(req_url, headers = self.req_header)\n return ret.json()",
"def test_projects_get(self):\n response = self.client.open('/project-tracker/projects',\n method='GET')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))",
"def get_projects(self):\n res = self.conn.cursor().execute(\"SELECT * FROM projects\")\n return res.fetchall()",
"def test_get_project_list_with_projects(self):\n # Add two test projects.\n projects = [\n add_project(title='1', description='1'),\n add_project(title='2', description='2'),\n ]\n\n result = get_project_list()\n result_projects = result['projects'].object_list\n\n # Make sure two test projects are retrieved.\n for project in projects:\n self.assertTrue(project in result_projects)\n self.assertEqual(len(result_projects), len(projects))\n self.assertIsNone(result['tag'])\n self.assertFalse(result['filtered'])",
"def project_list(self):\n try:\n ids = self.request[api.DATA][api.DATA][\"ids\"]\n return self._get_keystone_projects(ids)\n except Exception as e:\n LOG.exception(\"Error occurred: %s\" % e)",
"def list_projects(self):\n data = self._run(\n url_path=\"projects/list\"\n )\n projects = data['result'].get('projects', [])\n return [self._project_formatter(item) for item in projects]",
"def projects():\n response = jsonify(projects_service.get_top_level_projects_ids())\n return response",
"def _get_projects(project_ids):\n if _ALL in project_ids:\n return projects_lib.get_all()\n return projects_lib.get_selective(project_ids)",
"def test_get_projects_filters(fc: fetcher.Fetcher, test_project_name):\n projects = fc.get_projects(test_project_name)\n assert isinstance(projects, list)\n assert len(projects) == 1\n assert projects[0].name == test_project_name",
"def list_projects():\n\n cmd = dict()\n cmd[\"type_\"] = \"list_projects\"\n cmd[\"name_\"] = \"\"\n \n s = comm.send_and_receive_socket(cmd)\n\n msg = comm.recv_string(s)\n\n if msg != \"Success!\":\n raise Exception(msg)\n \n json_str = comm.recv_string(s) \n \n s.close() \n\n return json.loads(json_str)[\"projects\"]",
"def test_get_projects(self):\n pass",
"def list_projects(arn=None, nextToken=None):\n pass",
"def get_projects(self, *criterion):\n from wkcdd.models.helpers import get_project_list\n return get_project_list([self.id], *criterion)",
"def getprojects(self):\n resp = self.conn.request('GET', self.URLS['allprojects'], dict(api_key=self.api_key))\n data = resp.data.decode('utf-8')\n jdata = json.loads(data)['projects']\n # Convert nested JSON documents\n for project_index in range(len(jdata)):\n for field in ('options_json', 'templates_json'):\n jdata[project_index][field] = json.loads(jdata[project_index][field])\n # Pass project details dictionaries to constructors, return array\n return [PhProject(self, project) for project in jdata]",
"def get_projects(self):\n return conf.projects",
"def projects(self):\n ret_val = []\n params = {\"fields\": Project.FIELDS}\n projects = self._request(\"get\", \"projects\", params=params)\n\n for project in projects:\n ret_val.append(Project(project))\n\n return ret_val",
"def get_projects():\n return Project.query.all()",
"def list_projects(ctx):\n pprint(ctx.obj.groups.get().data)",
"def get_projects(self):\n return self._gitlab.owned_projects(per_page=1000)",
"def get_projects(self):\n return self.jira.projects()",
"def get_all_projects(self, scope):\n url = \"{0}/{1}/{2}\".format(self.keystone_server_url, DEFAULT_KEYSTONE_API_VERSION, \"projects\")\n headers = {'X-Auth-Token': scope.auth_token}\n try:\n r = self._make_request_with_auth_fallback(url, headers)\n return r['projects']\n\n except Exception as e:\n self.warning('Unable to get projects: %s', e)\n raise e\n\n return None",
"def get_project_list():\n return parse_list_output(Popen(\n 'openstack project list'.split(), stdout=STDOUT, stderr=STDERR\n ).communicate()[0])",
"def get_projects(self):\n ret = self.v1_projects.get()\n return [each.metadata.name for each in ret.items]",
"def getProjects(self):\n catalog = plone.api.portal.get_tool('portal_catalog')\n path = '{}/projects'.format('/'.join(plone.api.portal.get().getPhysicalPath()))\n query = dict(portal_type='Project', sort_on='sortable_title', path=path)\n result = list()\n for brain in catalog(**query):\n result.append((brain.getId, brain.Title))\n return result",
"def list_keystone_v3_projects(self):\n LOG_OBJ.debug(\"List the projects.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/projects\"\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while creating project\")\n print (\"No response from Server while creating project\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\" Creating project Failed with status %s \"\n \"and error : %s\" % (response.status, response.data))\n print (\" Creating project Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Projects list : %s \" % output)\n print (\"Projects list : %s \" % output)\n return output['projects']",
"def projects(args):\n _projects = lib.get_projects(\n args.target, username=args.username, password=args.password\n )\n if _projects:\n print(\"\\n\".join(_projects))",
"def all(cls):\n projects_url = 'https://www.pivotaltracker.com/services/v5/projects'\n root = _perform_pivotal_get(projects_url)\n if root is not None:\n return [Project.from_json(project_node) for project_node in root]"
] | [
"0.8439259",
"0.8330137",
"0.81635183",
"0.790544",
"0.7656669",
"0.7634909",
"0.7559611",
"0.7557729",
"0.7551785",
"0.7523433",
"0.7494402",
"0.7446309",
"0.74364996",
"0.7427242",
"0.73962003",
"0.7385047",
"0.73531884",
"0.7350818",
"0.73503566",
"0.7344059",
"0.7321423",
"0.7311092",
"0.73055935",
"0.7295584",
"0.7294882",
"0.7290948",
"0.72558933",
"0.7221671",
"0.72190756",
"0.719097"
] | 0.862502 | 0 |
fetchet.get_projects() should be able to filter on project. | def test_get_projects_filters(fc: fetcher.Fetcher, test_project_name):
projects = fc.get_projects(test_project_name)
assert isinstance(projects, list)
assert len(projects) == 1
assert projects[0].name == test_project_name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_projects(self):\n pass",
"def test_list_project_request(self):\n pass",
"def test_list_projects(self):\n pass",
"def test_list_projects(self):\n pass",
"def test_get_projects_returns_projects(fc: fetcher.Fetcher):\n projects = fc.get_projects()\n assert isinstance(projects, list)\n assert isinstance(projects[0], models.Project)",
"def test_get_project_list_with_projects(self):\n # Add two test projects.\n projects = [\n add_project(title='1', description='1'),\n add_project(title='2', description='2'),\n ]\n\n result = get_project_list()\n result_projects = result['projects'].object_list\n\n # Make sure two test projects are retrieved.\n for project in projects:\n self.assertTrue(project in result_projects)\n self.assertEqual(len(result_projects), len(projects))\n self.assertIsNone(result['tag'])\n self.assertFalse(result['filtered'])",
"def get_projects():\n return Project.query.all()",
"def _get_filtered_projects(filters):\n projects_itr = (projects_lib.get_filtered(f) for f in filters)\n return itertools.chain.from_iterable(projects_itr)",
"def test_list_project(self):\n pass",
"def filter_projects():\n with open('../results/01.crawling/01.project_ci_services.json', 'r') as infile:\n projects = json.load(infile)\n tr_projects = []\n for project, value in projects.items():\n if \"GitHub\" in value or \"Travis\" in value:\n tr_projects.append(project)\n return tr_projects",
"def get_projects(self, *criterion):\n from wkcdd.models.helpers import get_project_list\n return get_project_list([self.id], *criterion)",
"def test_get_project_list_with_tag_filter(self):\n # Add test projects.\n tag = 'tag1'\n projects_with_tag = [\n add_project(title='1', description='1', tags=[tag]),\n add_project(title='2', description='2', tags=[tag]),\n ]\n project_without_tag = add_project(title='3', description='3', tags=[])\n\n result = get_project_list(tag=tag)\n result_projects = result['projects'].object_list\n\n # Make sure only projects with tag are retrieved.\n for project_with_tag in projects_with_tag:\n self.assertTrue(project_with_tag in result_projects)\n self.assertFalse(project_without_tag in result_projects)\n self.assertEqual(len(result_projects), len(projects_with_tag))\n self.assertTrue(result['filtered'])\n self.assertEqual(result['tag'], tag)",
"def _get_projects(project_ids):\n if _ALL in project_ids:\n return projects_lib.get_all()\n return projects_lib.get_selective(project_ids)",
"def selectable_projects():\n\n db = current.db\n s3db = current.s3db\n\n # Lookup projects with provider self-registration\n ptable = s3db.project_project\n ttable = s3db.project_project_tag\n join = ttable.on((ttable.project_id == ptable.id) & \\\n (ttable.tag == \"APPLY\") & \\\n (ttable.value == \"Y\") & \\\n (ttable.deleted == False))\n query = (ptable.deleted == False)\n rows = db(query).select(ptable.id,\n ptable.name,\n join = join,\n )\n projects = {row.id: row.name for row in rows}\n return projects",
"def test_get_project_list_with_no_projects(self):\n result = get_project_list()\n self.assertQuerysetEqual(result['projects'].object_list, [])\n self.assertIsNone(result['tag'])\n self.assertFalse(result['filtered'])",
"def _get_projects(filters):\n # First order the objects, so separate that out\n orders_query = [o for o in filters if o['type']=='order']\n # Filter objects next, so separate those out\n filters_query = [f for f in filters if f['type']=='filter']\n\n projects = Project.objects.all()\n # We need a dictonary to pass to Django's filter function\n query_dict = {}\n # Order the projects based on the ordering queries\n for orders in orders_query:\n projects = projects.order_by(orders['property'])\n # create the dictonary based on the filtering queries\n for filters in filters_query:\n # First, if we want to filter by user, find the user\n if filters['property'] =='user':\n try:\n user_p = UserProfile.objects.get(email=filters['value'])\n query_dict[filters['property']] = user_p\n except UserProfile.DoesNotExist:\n raise Http404(\"User does not exist\")\n # Second, if the filter is by tags, change the query phrase\n # to 'tags__tag_name' - this is because tags is a ManyToManyField\n # and we want to search by the tag_name property of Tag objects\n elif filters['property'] == 'tags':\n filters['property'] = 'tags__tag_name'\n query_dict[filters['property']] = filters['value']\n else:\n # Make a dictionary, property: value, and you can pass it to filter fn\n query_dict[filters['property']] = filters['value']\n projects = projects.filter(**query_dict)\n return projects",
"def query_projects(request):\n try:\n filters = request.data\n except AttributeError:\n filters = FILTER\n projects = _get_projects(filters)\n projects_as_json = serializers.serialize('json', projects)\n return HttpResponse(json.dumps(projects_as_json), content_type='json')",
"def get_projects(self):\n projects = []\n page = 1\n while not len(projects) % 100:\n projects += self._get('/projects?{0}'.format(urllib.urlencode({'per_page': 100, 'page': page})))\n if not projects:\n break\n page += 1\n return projects",
"def test_get_current(self, rf, projects):\n # get queryset\n request = rf.get(\"/projects/my\")\n view = MyProjectListView()\n view.setup(request)\n view.dispatch(request)\n view.get_queryset()\n qs = view.get_current()\n\n # slavic working group grant ended so it is \"past\"\n assert projects[\"derrida\"] in qs\n assert projects[\"pliny\"] in qs\n assert projects[\"ocampo\"] in qs\n assert projects[\"slavic\"] not in qs",
"def test_get_projects_expanded(self):\n pass",
"def projects(self, request, pk=None):\n\n obj = self.get_object()\n try:\n query = models.Project.objects.filter(\n subject=obj.subject,\n assign=obj\n )\n serializer = self.get_serializer(query, many=True)\n\n id = self.request.query_params.get('id')\n\n if id:\n query = get_object_or_404(\n models.Project,\n id=id,\n assign=obj\n )\n return self.filtering(request, query)\n\n return Response(serializer.data)\n except:\n raise except_handler.ActionDecor()",
"def get_all_projects(self, org):\n return [proj for proj in Project.objects.filter(org=org)]",
"def get_projects(self):\n return conf.projects",
"def projects(request):\n return Project.objects.prefetch_related('task_set').filter(user=request.user)",
"def projects(self):\r\n return p.Projects(self)",
"def test_get_project_list_with_page_filter(self):\n # Add test projects.\n projects = [\n add_project(title=str(i), description=str(i)) for i in range(10)\n ]\n pages = {\n 1: projects[5:],\n 2: projects[:5],\n }\n\n # Check first page results.\n result = get_project_list(page=1)\n first_page_results = result['projects'].object_list\n for first_page_project in pages[1]:\n self.assertTrue(first_page_project in first_page_results)\n self.assertFalse(\n any(project in first_page_results for project in pages[2]))\n\n # Check second page results.\n result = get_project_list(page=2)\n second_page_results = result['projects'].object_list\n self.assertFalse(\n any(project in second_page_results for project in pages[1]))\n for second_page_project in pages[2]:\n self.assertTrue(second_page_project in second_page_results)",
"def open_projects(request):\n return Project.objects.prefetch_related('task_set').filter(user=request.user, open=True)",
"def get_projects(selection):\n project=[]\n\n # project with keyname\n\n if 'project' in selection.facets:\n project+=selection.facets['project']\n\n\n # project without keyname\n\n # WARNING\n #\n # The code below uses sdinference and query the database to retrieve ESGF parameters.\n # Doing those things here may raise circular dependencies\n # as well as making the whole thing very complex.\n #\n # We do this to make this syntax work (i.e. project value without key)\n # synda search GeoMIP\n #\n # Note that this syntax always works (i.e. load the project level default file), even without this code.\n # synda search project=GeoMIP\n #\n #\n pending_projects=sdearlystreamutils.get_facet_values_early([selection.facets],'project',extract_item=True) # project without keyname or project as part of an identifier.\n\n li=pending_projects+project\n\n li=list(set(li)) # remove duplicate\n\n return li",
"def _get_projects(current_project_name):\n projects = []\n\n unique_project_changes = Change.objects.order_by().values(\n 'project_name').distinct()\n for change in unique_project_changes:\n projects.append(change['project_name'])\n\n # sort alphabetically\n projects.sort()\n\n # insert 'all' option as it should be present always\n projects.insert(0, PROJECT_ALL)\n\n # if current_project_name is valid, make it the first element in list so\n # that it shows up as selected in project choice drop down\n if current_project_name != PROJECT_ALL and current_project_name in projects:\n projects.remove(current_project_name)\n projects.insert(0, current_project_name)\n elif current_project_name != PROJECT_ALL:\n logging.error(\"Currently selected project %s not found in any changes.\"\n \" Removing from list.\", current_project_name)\n logging.debug(\"Returning list of projects: %r\", projects)\n return projects",
"def test_get_project(self):\n pass"
] | [
"0.740072",
"0.7351347",
"0.7287984",
"0.7287984",
"0.72771144",
"0.72143286",
"0.7165112",
"0.71590155",
"0.7144451",
"0.7075294",
"0.7046728",
"0.7021743",
"0.70181876",
"0.69926757",
"0.698769",
"0.6950364",
"0.6894256",
"0.68472075",
"0.6795028",
"0.67938405",
"0.6792199",
"0.6790819",
"0.6777117",
"0.6760707",
"0.67518044",
"0.6746114",
"0.6744328",
"0.6696477",
"0.66728646",
"0.6671634"
] | 0.829477 | 0 |
fetcher.get_models() should return a list of models. | def test_get_models_returns_models(fc: fetcher.Fetcher):
ml = fc.get_models()
assert isinstance(ml, list)
assert isinstance(ml[0], models.LookmlModel) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_models(self, req, is_detail):\n context = req.environ['meteos.context']\n\n search_opts = {}\n search_opts.update(req.GET)\n\n # Remove keys that are not related to model attrs\n search_opts.pop('limit', None)\n search_opts.pop('offset', None)\n sort_key = search_opts.pop('sort_key', 'created_at')\n sort_dir = search_opts.pop('sort_dir', 'desc')\n\n models = self.engine_api.get_all_models(\n context, search_opts=search_opts, sort_key=sort_key,\n sort_dir=sort_dir)\n\n limited_list = common.limited(models, req)\n\n if is_detail:\n models = self._view_builder.detail_list(req, limited_list)\n else:\n models = self._view_builder.summary_list(req, limited_list)\n return models",
"def models(self):\n \n # Call the Razor RESTful API to get a list of models\n headers = {'content-type': 'application/json'}\n r = requests.get(self.url + '/model', headers=headers)\n\n # Check the status code and return appropriately\n if r.status_code == 200:\n return json.loads(r.content)\n else:\n return 'Error in request, exited with status code:' + str(r.status_code)",
"def models() -> list[str]:\n return list(models_url.keys())",
"def get_models(self):\n self.load()\n return self._models",
"def GetModels(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def ListModels(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def get_models(self, app_name):\n try:\n models = list(apps.get_app_config(app_name).get_models())\n return models\n except:\n raise LookupError(f\"this is no such app {app_name}\")",
"def get_models(make):\n api_url = 'https://api.edmunds.com/api/vehicle/v2/{}/models?fmt=json&api_key={}'\\\n .format(make, API_KEY)\n r = requests.get(api_url).json()\n all_models = [model['name'] for model in r['models']]\n return all_models",
"def get_model_list():\n with open(os.path.join(MODELS_FOLDER, \"models.json\"), \"r\") as model_file:\n model_list = json.load(model_file)\n return model_list",
"def opt_get_all_models_rest_api():\n return retrieve_all_models()",
"def get_models(self, model_names, callback):\n models = self.get_collection('_model')\n models.find(spec={'_id': {'$in': tuple(model_names)}}).to_list(\n callback=callback)",
"def fetch_models(self):\n if self.model_pool is None:\n print(\"Please train a model first.\", file=STDE)\n EXIT(1)\n else:\n return [copy.deepcopy(m.steps[-1][-1]) for m in self.model_pool]",
"def list_models(self, fields=['model_name']):\n\t\tmodels = dbop.get_models(self, model_type='all', fields=fields)\n\t\treturn models",
"def list_models(SortBy=None, SortOrder=None, NextToken=None, MaxResults=None, NameContains=None, CreationTimeBefore=None, CreationTimeAfter=None):\n pass",
"def load_models(self, models, iteration = None):\n print(\"NOT IMPLEMENTED YET\")",
"def get_models_for_make(self, make):\n return self.get('vehicles/GetModelsForMake/{}'.format(make))",
"def ListModels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"async def list_models(\n list_models_request: ListModels,\n token: str = Depends(oauth2_scheme),\n):\n try:\n logging.info(\"Calling /gcp/automl/list_models endpoint\")\n logging.debug(f\"Request: {list_models_request}\")\n if decodeJWT(token=token):\n response = ManageModelController().list_model_controller(\n request=list_models_request\n )\n return response\n else:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Invalid access token\",\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n except Exception as error:\n logging.error(f\"Error in /gcp/automl/list_models endpoint: {error}\")\n raise error",
"def import_data(self):\n self.models = []\n for o in self.loader.load():\n klass = self.type_for(o)\n if hasattr(klass, \"from_api\"):\n self.models.append(klass.from_api(o))\n else:\n self.models.append(klass(o))\n return self.models",
"def get_models():\n\n from lmfit.models import lmfit_models\n models = lmfit_models\n if 'Expression' in models:\n del models['Expression']\n if 'Gaussian-2D' in models:\n del models['Gaussian-2D']\n\n filenames = set()\n\n models_path = pkg_resources.resource_filename('nexpy.api.frills',\n 'models')\n sys.path.append(models_path)\n for file_ in os.listdir(models_path):\n name, ext = os.path.splitext(file_)\n if name != '__init__' and ext.startswith('.py'):\n filenames.add(name)\n\n private_path = os.path.join(os.path.expanduser('~'), '.nexpy', 'models')\n if os.path.isdir(private_path):\n sys.path.append(private_path)\n for file_ in os.listdir(private_path):\n name, ext = os.path.splitext(file_)\n if name != '__init__' and ext.startswith('.py'):\n filenames.add(name)\n\n for name in sorted(filenames):\n try:\n module = importlib.import_module(name)\n models.update(dict((n.strip('Model'), m)\n for n, m in inspect.getmembers(module,\n inspect.isclass)\n if issubclass(m, Model) and n != 'Model'))\n except ImportError:\n pass\n\n return models",
"def ListModels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def ListModels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def GetModels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def _retrieve_models(local=True):\n # Check if the download folder exists\n def _get_meta_data(model_name, file):\n return {\n \"data\": {\n \"id\": model_name,\n \"name\": model_name,\n \"description\": model_name,\n \"filename\": os.path.join(\n app.config[\"DOWNLOAD_DIR\"], file),\n \"created\": time.ctime(os.path.getctime(\n os.path.join(\n app.config[\"DOWNLOAD_DIR\"],\n file))),\n \"modified\": time.ctime(os.path.getmtime(\n os.path.join(\n app.config[\"DOWNLOAD_DIR\"],\n file)))\n }\n }\n\n if not os.path.exists(app.config[\"DOWNLOAD_DIR\"]):\n os.makedirs(app.config[\"DOWNLOAD_DIR\"])\n\n if not local:\n # Fetch from a Nexus-hosted catalog\n resources = app.forge.search({\"type\": \"EmbeddingModel\"})\n for resource in resources:\n app.models[resource.name] = {\n \"data\": digest_model_data(resource),\n }\n app.forge.download(\n resource, \"distribution.contentUrl\",\n app.config[\"DOWNLOAD_DIR\"])\n\n pipeline_path = os.path.join(\n app.config[\"DOWNLOAD_DIR\"],\n resource.distribution.name)\n app.models[resource.name][\"object\"] = EmbeddingPipeline.load(\n pipeline_path,\n embedder_interface=GraphElementEmbedder,\n embedder_ext=\"zip\")\n\n # Clear the downloads dir\n for f in os.listdir(app.config[\"DOWNLOAD_DIR\"]):\n try:\n os.remove(os.path.join(app.config[\"DOWNLOAD_DIR\"], f))\n except Exception:\n shutil.rmtree(os.path.join(app.config[\"DOWNLOAD_DIR\"], f))\n else:\n # Fetch from a local dir\n for (_, dirs, files) in os.walk(app.config[\"DOWNLOAD_DIR\"]):\n for path in dirs + files:\n if path[0] != \".\":\n match = re.match(r\"(.*)\\.zip\", path)\n if match:\n model_name = match.groups()[0]\n else:\n model_name = path\n app.models[model_name] = _get_meta_data(model_name, path)\n pipeline_path = os.path.join(\n app.config[\"DOWNLOAD_DIR\"], path)\n app.models[model_name][\"object\"] = EmbeddingPipeline.load(\n pipeline_path,\n embedder_interface=GraphElementEmbedder,\n embedder_ext=\"zip\")\n break",
"def getModels(makeURL):\n\n #Get make page as Soup\n soup, _ = getPage(makeURL)\n\n #Check if page available\n if soup is None:\n #Not available - break\n print(\"Can't find Make URL\")\n quit()\n\n #Try to find models list\n try:\n #Find span with text \"Make\"\n span = soup.find(class_=\"srp-filter-group__filter-name\", text=\"Make\")\n #Move up two parents\n a = span.parent.parent\n #Find all filter names\n b = a.find_all(class_=\"srp-list-filter__item-link link link--no-underline\")\n models = [i['href'] for i in b]\n models = models[1:]\n except:\n print(makeURL)\n models=[]\n \n logger.debug(f\"Models include: {models}\")\n return models",
"def list_models():\n\tclsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)\n\tverif = lambda cls: 'Verified: {0}'.format(cls[1]().verified)\n\tfit_models = [ (cls[0], verif(cls)) for cls in clsmembers if cls[1].__bases__[0] == core.FitModel ]\n\treturn fit_models",
"def get_all_models() -> list:\n\n conn = database_connect()\n if(conn is None):\n return None\n cur = conn.cursor()\n\n try:\n # SQL statement and execute\n sql = \"\"\"SELECT manufacturer, description, modelnumber, weight\n FROM Model\"\"\"\n cur.execute(sql, ())\n\n # Attempt to fetch first row\n result = cur.fetchall()\n\n if result == None:\n cur.close()\n conn.close()\n return []\n\n models = []\n for row in result:\n models.append(\n [row[0], row[1], row[2], row[3]]\n )\n\n cur.close()\n conn.close()\n return models\n except Exception as e:\n print(\"fff\")\n print(e)\n # If login failed, return None\n cur.close()\n conn.close()\n return []",
"def generate_model_list():\n\t\n\tmodels = [\n\t\tapi.v1.models.job.Job,\n\t]\n\treturn models",
"def list_models(self):\n \n # Interpret the request data based on the expected row and column structure\n row_template = ['strData']\n col_headers = ['search_pattern']\n \n # Create a Pandas Data Frame for the request data\n self.request_df = utils.request_df(self.request, row_template, col_headers)\n \n # Get the list of models based on the search pattern\n search_pattern = self.request_df.loc[0, 'search_pattern']\n \n # If the search pattern is empty default to all models\n if not search_pattern.strip():\n search_pattern = '*'\n \n # Get the list of models as a string\n models = \"\\n\".join([str(p).split(\"\\\\\")[-1] for p in list(pathlib.Path(self.path).glob(search_pattern))])\n \n # Prepare the output\n self.response = pd.Series(models)\n \n # Finally send the response\n return self.response",
"def ez_get_models(auth_token, options = None):\n status_code = 500\n try:\n API_REQUEST_URL = API_URL + \"/ez_get_models\"\n payload = {\n \"options\": options\n }\n headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer \" + str(auth_token),\n }\n response = requests.request(\n \"POST\", API_REQUEST_URL, headers = headers, data = json.dumps(payload)\n )\n status_code = response.status_code\n try:\n response_json = response.json()\n except Exception as e:\n response.raise_for_status()\n response_json[\"status_code\"] = status_code\n return response_json\n except Exception as e:\n print((traceback.print_exc()))\n return exception_return(e, status_code)"
] | [
"0.7452906",
"0.72231185",
"0.7150281",
"0.71438277",
"0.70214987",
"0.69560665",
"0.69169444",
"0.6905092",
"0.68741965",
"0.6871369",
"0.6853938",
"0.6839921",
"0.67970765",
"0.67815137",
"0.6671083",
"0.66492426",
"0.6614827",
"0.65691483",
"0.6551473",
"0.64561534",
"0.64030915",
"0.64030915",
"0.6399482",
"0.6396188",
"0.63814276",
"0.6364001",
"0.6348248",
"0.63399523",
"0.633756",
"0.63346493"
] | 0.769774 | 0 |
fetcher.get_models() should be able to filter on project or model. | def test_get_models_filters(fc: fetcher.Fetcher, test_project_name, test_model):
ml = fc.get_models(project=test_project_name)
assert all(m.project_name == test_project_name for m in ml)
ml = fc.get_models(model=test_model["name"])
assert all(m.name == test_model["name"] for m in ml)
ml = fc.get_models(project=test_project_name, model=test_model["name"])
assert all(
m.project_name == test_project_name and m.name == test_model["name"] for m in ml
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_models(self, req, is_detail):\n context = req.environ['meteos.context']\n\n search_opts = {}\n search_opts.update(req.GET)\n\n # Remove keys that are not related to model attrs\n search_opts.pop('limit', None)\n search_opts.pop('offset', None)\n sort_key = search_opts.pop('sort_key', 'created_at')\n sort_dir = search_opts.pop('sort_dir', 'desc')\n\n models = self.engine_api.get_all_models(\n context, search_opts=search_opts, sort_key=sort_key,\n sort_dir=sort_dir)\n\n limited_list = common.limited(models, req)\n\n if is_detail:\n models = self._view_builder.detail_list(req, limited_list)\n else:\n models = self._view_builder.summary_list(req, limited_list)\n return models",
"def test_get_models_returns_models(fc: fetcher.Fetcher):\n ml = fc.get_models()\n assert isinstance(ml, list)\n assert isinstance(ml[0], models.LookmlModel)",
"def GetModels(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def opt_get_all_models_rest_api():\n return retrieve_all_models()",
"def models_list(request):\n projects = Project.objects.filter(models=1)\n return render(request, 'screenshower/app/models_list.html', {'projects': projects})",
"def list_models(SortBy=None, SortOrder=None, NextToken=None, MaxResults=None, NameContains=None, CreationTimeBefore=None, CreationTimeAfter=None):\n pass",
"def test_coupledmodels_get(self):\n pass",
"def get_models(self, model_names, callback):\n models = self.get_collection('_model')\n models.find(spec={'_id': {'$in': tuple(model_names)}}).to_list(\n callback=callback)",
"def get_models(self, app_name):\n try:\n models = list(apps.get_app_config(app_name).get_models())\n return models\n except:\n raise LookupError(f\"this is no such app {app_name}\")",
"def models(self):\r\n return self.get_field('model')",
"def models(self):\r\n return self.get_field('model')",
"def get_models_for_make(self, make):\n return self.get('vehicles/GetModelsForMake/{}'.format(make))",
"def list_models(self, fields=['model_name']):\n\t\tmodels = dbop.get_models(self, model_type='all', fields=fields)\n\t\treturn models",
"def test_get_models_throws_if_project_does_not_exist(\n fc: fetcher.Fetcher, project, model\n):\n with pytest.raises(exceptions.NotFoundError) as exc:\n fc.get_models(project=project, model=model)\n assert \"An error occured while getting projects.\" in str(exc.value)",
"def test_get_models(self):\n models = get_models()\n self.assertTrue(len(models) > 6)\n self.assertIn((\"csvimport.Item\", \"csvimport.Item\"), models)",
"def _retrieve_models(local=True):\n # Check if the download folder exists\n def _get_meta_data(model_name, file):\n return {\n \"data\": {\n \"id\": model_name,\n \"name\": model_name,\n \"description\": model_name,\n \"filename\": os.path.join(\n app.config[\"DOWNLOAD_DIR\"], file),\n \"created\": time.ctime(os.path.getctime(\n os.path.join(\n app.config[\"DOWNLOAD_DIR\"],\n file))),\n \"modified\": time.ctime(os.path.getmtime(\n os.path.join(\n app.config[\"DOWNLOAD_DIR\"],\n file)))\n }\n }\n\n if not os.path.exists(app.config[\"DOWNLOAD_DIR\"]):\n os.makedirs(app.config[\"DOWNLOAD_DIR\"])\n\n if not local:\n # Fetch from a Nexus-hosted catalog\n resources = app.forge.search({\"type\": \"EmbeddingModel\"})\n for resource in resources:\n app.models[resource.name] = {\n \"data\": digest_model_data(resource),\n }\n app.forge.download(\n resource, \"distribution.contentUrl\",\n app.config[\"DOWNLOAD_DIR\"])\n\n pipeline_path = os.path.join(\n app.config[\"DOWNLOAD_DIR\"],\n resource.distribution.name)\n app.models[resource.name][\"object\"] = EmbeddingPipeline.load(\n pipeline_path,\n embedder_interface=GraphElementEmbedder,\n embedder_ext=\"zip\")\n\n # Clear the downloads dir\n for f in os.listdir(app.config[\"DOWNLOAD_DIR\"]):\n try:\n os.remove(os.path.join(app.config[\"DOWNLOAD_DIR\"], f))\n except Exception:\n shutil.rmtree(os.path.join(app.config[\"DOWNLOAD_DIR\"], f))\n else:\n # Fetch from a local dir\n for (_, dirs, files) in os.walk(app.config[\"DOWNLOAD_DIR\"]):\n for path in dirs + files:\n if path[0] != \".\":\n match = re.match(r\"(.*)\\.zip\", path)\n if match:\n model_name = match.groups()[0]\n else:\n model_name = path\n app.models[model_name] = _get_meta_data(model_name, path)\n pipeline_path = os.path.join(\n app.config[\"DOWNLOAD_DIR\"], path)\n app.models[model_name][\"object\"] = EmbeddingPipeline.load(\n pipeline_path,\n embedder_interface=GraphElementEmbedder,\n embedder_ext=\"zip\")\n break",
"def models() -> list[str]:\n return list(models_url.keys())",
"def models(self):\n \n # Call the Razor RESTful API to get a list of models\n headers = {'content-type': 'application/json'}\n r = requests.get(self.url + '/model', headers=headers)\n\n # Check the status code and return appropriately\n if r.status_code == 200:\n return json.loads(r.content)\n else:\n return 'Error in request, exited with status code:' + str(r.status_code)",
"def test_get_models_throws_if_model_does_not_exist(fc: fetcher.Fetcher, project, model):\n with pytest.raises(exceptions.NotFoundError) as exc:\n fc.get_models(project=project, model=model)\n assert \"An error occured while getting models.\" in str(exc.value)",
"def ListModels(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def load_models(self, models, iteration = None):\n print(\"NOT IMPLEMENTED YET\")",
"def get_models(automaker, year):\n\n return set([car['model'] for car in data if car['automaker'] == automaker and car['year'] == year])",
"def test_get_used_models(fc: fetcher.Fetcher, test_model):\n used_models = fc.get_used_models()\n assert isinstance(used_models, dict)\n assert len(used_models) > 0\n assert all(type(model_name) == str for model_name in used_models.keys())\n assert all(type(query_count) == int for query_count in used_models.values())\n assert test_model[\"name\"] in used_models.keys()",
"def getModels(makeURL):\n\n #Get make page as Soup\n soup, _ = getPage(makeURL)\n\n #Check if page available\n if soup is None:\n #Not available - break\n print(\"Can't find Make URL\")\n quit()\n\n #Try to find models list\n try:\n #Find span with text \"Make\"\n span = soup.find(class_=\"srp-filter-group__filter-name\", text=\"Make\")\n #Move up two parents\n a = span.parent.parent\n #Find all filter names\n b = a.find_all(class_=\"srp-list-filter__item-link link link--no-underline\")\n models = [i['href'] for i in b]\n models = models[1:]\n except:\n print(makeURL)\n models=[]\n \n logger.debug(f\"Models include: {models}\")\n return models",
"def test_get_model_names():\n\n names = Instafilter.get_models()\n assert isinstance(names, list)\n assert len(names) > 1",
"def GetModels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def get_models(automaker, year):\n return set([row[\"model\"] for row in data\n if row[\"automaker\"] == automaker and\n row[\"year\"] == year])",
"def get_models(make):\n api_url = 'https://api.edmunds.com/api/vehicle/v2/{}/models?fmt=json&api_key={}'\\\n .format(make, API_KEY)\n r = requests.get(api_url).json()\n all_models = [model['name'] for model in r['models']]\n return all_models",
"def handle_models_request():\n # TODO: add sort and filter by creation/modification date\n return (\n json.dumps({\"models\": {\n k: d[\"data\"] for k, d in app.models.items()\n }}), 200,\n {'ContentType': 'application/json'}\n )",
"def get_models(self):\n self.load()\n return self._models"
] | [
"0.72766924",
"0.66285986",
"0.6556921",
"0.6551836",
"0.6296319",
"0.626473",
"0.6252031",
"0.61985433",
"0.61647654",
"0.61226976",
"0.61226976",
"0.607028",
"0.60465264",
"0.6035401",
"0.60337895",
"0.59634507",
"0.5959686",
"0.5941662",
"0.5922241",
"0.59054995",
"0.5881448",
"0.58800495",
"0.5876396",
"0.5874023",
"0.5873466",
"0.58549595",
"0.58486265",
"0.58384407",
"0.5836732",
"0.5836436"
] | 0.75294465 | 0 |
fetcher.get_models() should throw if a model is not found. | def test_get_models_throws_if_model_does_not_exist(fc: fetcher.Fetcher, project, model):
with pytest.raises(exceptions.NotFoundError) as exc:
fc.get_models(project=project, model=model)
assert "An error occured while getting models." in str(exc.value) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_models_returns_models(fc: fetcher.Fetcher):\n ml = fc.get_models()\n assert isinstance(ml, list)\n assert isinstance(ml[0], models.LookmlModel)",
"def get_models(self, app_name):\n try:\n models = list(apps.get_app_config(app_name).get_models())\n return models\n except:\n raise LookupError(f\"this is no such app {app_name}\")",
"def GetModels(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def _get_models(self, req, is_detail):\n context = req.environ['meteos.context']\n\n search_opts = {}\n search_opts.update(req.GET)\n\n # Remove keys that are not related to model attrs\n search_opts.pop('limit', None)\n search_opts.pop('offset', None)\n sort_key = search_opts.pop('sort_key', 'created_at')\n sort_dir = search_opts.pop('sort_dir', 'desc')\n\n models = self.engine_api.get_all_models(\n context, search_opts=search_opts, sort_key=sort_key,\n sort_dir=sort_dir)\n\n limited_list = common.limited(models, req)\n\n if is_detail:\n models = self._view_builder.detail_list(req, limited_list)\n else:\n models = self._view_builder.summary_list(req, limited_list)\n return models",
"def test_get_models_throws_if_project_does_not_exist(\n fc: fetcher.Fetcher, project, model\n):\n with pytest.raises(exceptions.NotFoundError) as exc:\n fc.get_models(project=project, model=model)\n assert \"An error occured while getting projects.\" in str(exc.value)",
"def test_get_model_method_with_missing_model(self):\n # arrange\n model_manager = ModelManager()\n\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n exception_raised = False\n exception_message = \"\"\n model = None\n try:\n model = model_manager.get_model(qualified_name=\"asdf\")\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n\n # assert\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message == \"Instance of model 'asdf' not found in ModelManager.\")",
"def load_models(self, models, iteration = None):\n print(\"NOT IMPLEMENTED YET\")",
"def models(self):\n \n # Call the Razor RESTful API to get a list of models\n headers = {'content-type': 'application/json'}\n r = requests.get(self.url + '/model', headers=headers)\n\n # Check the status code and return appropriately\n if r.status_code == 200:\n return json.loads(r.content)\n else:\n return 'Error in request, exited with status code:' + str(r.status_code)",
"def load_models(model_name=\"\", path=\"\", read_grains=False, **kwargs):\n models = pc.load_models('{0}{1}'.format(path, model_name), read_grains=read_grains, **kwargs)\n models_return = []\n for m in models:\n if m.n_zones > 1:\n models_return.append(m)\n else:\n # log failed model\n pass\n return models_return",
"def get_models_for_make(self, make):\n return self.get('vehicles/GetModelsForMake/{}'.format(make))",
"def get_models(self):\n self.load()\n return self._models",
"def fetch_models(self):\n if self.model_pool is None:\n print(\"Please train a model first.\", file=STDE)\n EXIT(1)\n else:\n return [copy.deepcopy(m.steps[-1][-1]) for m in self.model_pool]",
"def get_models(self, model_names, callback):\n models = self.get_collection('_model')\n models.find(spec={'_id': {'$in': tuple(model_names)}}).to_list(\n callback=callback)",
"def check_models():\n # Check if Models exist\n if request.method == 'GET':\n if models_exist():\n return jsonify({'message': 'models found'})\n else:\n return jsonify({'message': 'one or more models missing'}), 409\n\n # Post Method, download models\n else:\n task = download_models.apply_async()\n task_url = url_for('model_download_status', task_id=task.id)\n return jsonify({'message': 'download started', 'location': task_url}), 202, {'Location': task_url}",
"def try_models(self):\n result = os.system(\"python try_models.py\")\n return result == 0",
"def ez_get_models(auth_token, options = None):\n status_code = 500\n try:\n API_REQUEST_URL = API_URL + \"/ez_get_models\"\n payload = {\n \"options\": options\n }\n headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer \" + str(auth_token),\n }\n response = requests.request(\n \"POST\", API_REQUEST_URL, headers = headers, data = json.dumps(payload)\n )\n status_code = response.status_code\n try:\n response_json = response.json()\n except Exception as e:\n response.raise_for_status()\n response_json[\"status_code\"] = status_code\n return response_json\n except Exception as e:\n print((traceback.print_exc()))\n return exception_return(e, status_code)",
"def GetModels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def test_coupledmodels_get(self):\n pass",
"def get_model_by_name(self, model_name):\n models = ModelDirectory.get_model_by_name(model_name, pipeline=self)\n return models",
"def get_models(make):\n api_url = 'https://api.edmunds.com/api/vehicle/v2/{}/models?fmt=json&api_key={}'\\\n .format(make, API_KEY)\n r = requests.get(api_url).json()\n all_models = [model['name'] for model in r['models']]\n return all_models",
"def GetModels(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def _retrieve_models(local=True):\n # Check if the download folder exists\n def _get_meta_data(model_name, file):\n return {\n \"data\": {\n \"id\": model_name,\n \"name\": model_name,\n \"description\": model_name,\n \"filename\": os.path.join(\n app.config[\"DOWNLOAD_DIR\"], file),\n \"created\": time.ctime(os.path.getctime(\n os.path.join(\n app.config[\"DOWNLOAD_DIR\"],\n file))),\n \"modified\": time.ctime(os.path.getmtime(\n os.path.join(\n app.config[\"DOWNLOAD_DIR\"],\n file)))\n }\n }\n\n if not os.path.exists(app.config[\"DOWNLOAD_DIR\"]):\n os.makedirs(app.config[\"DOWNLOAD_DIR\"])\n\n if not local:\n # Fetch from a Nexus-hosted catalog\n resources = app.forge.search({\"type\": \"EmbeddingModel\"})\n for resource in resources:\n app.models[resource.name] = {\n \"data\": digest_model_data(resource),\n }\n app.forge.download(\n resource, \"distribution.contentUrl\",\n app.config[\"DOWNLOAD_DIR\"])\n\n pipeline_path = os.path.join(\n app.config[\"DOWNLOAD_DIR\"],\n resource.distribution.name)\n app.models[resource.name][\"object\"] = EmbeddingPipeline.load(\n pipeline_path,\n embedder_interface=GraphElementEmbedder,\n embedder_ext=\"zip\")\n\n # Clear the downloads dir\n for f in os.listdir(app.config[\"DOWNLOAD_DIR\"]):\n try:\n os.remove(os.path.join(app.config[\"DOWNLOAD_DIR\"], f))\n except Exception:\n shutil.rmtree(os.path.join(app.config[\"DOWNLOAD_DIR\"], f))\n else:\n # Fetch from a local dir\n for (_, dirs, files) in os.walk(app.config[\"DOWNLOAD_DIR\"]):\n for path in dirs + files:\n if path[0] != \".\":\n match = re.match(r\"(.*)\\.zip\", path)\n if match:\n model_name = match.groups()[0]\n else:\n model_name = path\n app.models[model_name] = _get_meta_data(model_name, path)\n pipeline_path = os.path.join(\n app.config[\"DOWNLOAD_DIR\"], path)\n app.models[model_name][\"object\"] = EmbeddingPipeline.load(\n pipeline_path,\n embedder_interface=GraphElementEmbedder,\n embedder_ext=\"zip\")\n break",
"def opt_get_all_models_rest_api():\n return retrieve_all_models()",
"def ListModels(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def models() -> list[str]:\n return list(models_url.keys())",
"def test_get_model(self) -> None:\n get_model()",
"def test_get_models(self):\n models = get_models()\n self.assertTrue(len(models) > 6)\n self.assertIn((\"csvimport.Item\", \"csvimport.Item\"), models)",
"def get_models():\n\n from lmfit.models import lmfit_models\n models = lmfit_models\n if 'Expression' in models:\n del models['Expression']\n if 'Gaussian-2D' in models:\n del models['Gaussian-2D']\n\n filenames = set()\n\n models_path = pkg_resources.resource_filename('nexpy.api.frills',\n 'models')\n sys.path.append(models_path)\n for file_ in os.listdir(models_path):\n name, ext = os.path.splitext(file_)\n if name != '__init__' and ext.startswith('.py'):\n filenames.add(name)\n\n private_path = os.path.join(os.path.expanduser('~'), '.nexpy', 'models')\n if os.path.isdir(private_path):\n sys.path.append(private_path)\n for file_ in os.listdir(private_path):\n name, ext = os.path.splitext(file_)\n if name != '__init__' and ext.startswith('.py'):\n filenames.add(name)\n\n for name in sorted(filenames):\n try:\n module = importlib.import_module(name)\n models.update(dict((n.strip('Model'), m)\n for n, m in inspect.getmembers(module,\n inspect.isclass)\n if issubclass(m, Model) and n != 'Model'))\n except ImportError:\n pass\n\n return models",
"def get_best_models(self, num_models) -> Sequence[tf.keras.Model]:\n pass",
"def test_get_model_metadata_method_with_missing_model(self):\n # arrange\n model_manager = ModelManager()\n\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n # act\n excpeption_raised = False\n exception_message = None\n try:\n model_metadata = model_manager.get_model_metadata(qualified_name=\"asdf\")\n except Exception as e:\n excpeption_raised = True\n exception_message = str(e)\n\n # assert\n self.assertTrue(excpeption_raised)\n self.assertTrue(exception_message == \"Instance of model 'asdf' not found in ModelManager.\")"
] | [
"0.7327567",
"0.69435924",
"0.67743224",
"0.67731684",
"0.666128",
"0.66390353",
"0.6595562",
"0.6590047",
"0.64084816",
"0.6316313",
"0.6267025",
"0.62651926",
"0.626041",
"0.6233475",
"0.6200443",
"0.61957514",
"0.61935633",
"0.61921066",
"0.61672646",
"0.61589247",
"0.6154951",
"0.6127301",
"0.6122401",
"0.6119595",
"0.60895294",
"0.6084304",
"0.6076584",
"0.60558337",
"0.60204977",
"0.60099065"
] | 0.75194955 | 0 |
fetcher.get_used_models() should return models that have queries against them. | def test_get_used_models(fc: fetcher.Fetcher, test_model):
used_models = fc.get_used_models()
assert isinstance(used_models, dict)
assert len(used_models) > 0
assert all(type(model_name) == str for model_name in used_models.keys())
assert all(type(query_count) == int for query_count in used_models.values())
assert test_model["name"] in used_models.keys() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def availablemodels(self):\n return self.__models.keys()",
"def _get_models(self, req, is_detail):\n context = req.environ['meteos.context']\n\n search_opts = {}\n search_opts.update(req.GET)\n\n # Remove keys that are not related to model attrs\n search_opts.pop('limit', None)\n search_opts.pop('offset', None)\n sort_key = search_opts.pop('sort_key', 'created_at')\n sort_dir = search_opts.pop('sort_dir', 'desc')\n\n models = self.engine_api.get_all_models(\n context, search_opts=search_opts, sort_key=sort_key,\n sort_dir=sort_dir)\n\n limited_list = common.limited(models, req)\n\n if is_detail:\n models = self._view_builder.detail_list(req, limited_list)\n else:\n models = self._view_builder.summary_list(req, limited_list)\n return models",
"def get_available_models():\n modelpath = os.path.join(os.path.dirname(__file__), \"train\", \"model\")\n models = sorted(item.name.replace(\".py\", \"\").replace(\"_\", \"-\")\n for item in os.scandir(modelpath)\n if not item.name.startswith(\"_\")\n and item.name.endswith(\".py\"))\n return models",
"def get_models(self):\n self.load()\n return self._models",
"def fetch_models(self):\n if self.model_pool is None:\n print(\"Please train a model first.\", file=STDE)\n EXIT(1)\n else:\n return [copy.deepcopy(m.steps[-1][-1]) for m in self.model_pool]",
"def get_installed_models():\n global _installed_models_cache\n if _installed_models_cache is not None:\n return _installed_models_cache\n _installed_models_cache = []\n for a in settings.INSTALLED_APPS:\n try:\n _installed_models_cache.append(__import__(a + '.models', '', '', ['']))\n except ImportError:\n pass\n return _installed_models_cache",
"def get_available_entities_models():\n return ['concat', 'bahdanau', 'luong']",
"def get_models_query():\n query = db.session.query(Products.model).distinct()\n return query",
"def get_models(self, app_name):\n try:\n models = list(apps.get_app_config(app_name).get_models())\n return models\n except:\n raise LookupError(f\"this is no such app {app_name}\")",
"def test_coupledmodels_get(self):\n pass",
"def test_get_models_returns_models(fc: fetcher.Fetcher):\n ml = fc.get_models()\n assert isinstance(ml, list)\n assert isinstance(ml[0], models.LookmlModel)",
"def get_best_models(self, num_models) -> Sequence[tf.keras.Model]:\n pass",
"def GetModels(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def models(self):\n return self.config.models()",
"def pending_models(self):\n return self._pending_models",
"def requires_model_loading(self):\n return self.requires_loaded_models",
"def list_available_models(cls) -> 'List[PretrainedModelInfo]':\n list_of_models = []\n for subclass in cls.__subclasses__():\n subclass_models = subclass.list_available_models()\n if subclass_models is not None and len(subclass_models) > 0:\n list_of_models.extend(subclass_models)\n return list_of_models",
"def list_available_models(cls) -> 'List[PretrainedModelInfo]':\n list_of_models = []\n for subclass in cls.__subclasses__():\n subclass_models = subclass.list_available_models()\n if subclass_models is not None and len(subclass_models) > 0:\n list_of_models.extend(subclass_models)\n return list_of_models",
"def list_available_models(cls) -> 'List[PretrainedModelInfo]':\n list_of_models = []\n for subclass in cls.__subclasses__():\n subclass_models = subclass.list_available_models()\n if subclass_models is not None and len(subclass_models) > 0:\n list_of_models.extend(subclass_models)\n return list_of_models",
"def list_available_models(cls) -> 'List[PretrainedModelInfo]':\n list_of_models = []\n for subclass in cls.__subclasses__():\n subclass_models = subclass.list_available_models()\n if subclass_models is not None and len(subclass_models) > 0:\n list_of_models.extend(subclass_models)\n return list_of_models",
"def opt_get_all_models_rest_api():\n return retrieve_all_models()",
"def list_available_models(cls) -> 'List[PretrainedModelInfo]':\n # recursively walk the subclasses to generate pretrained model info\n list_of_models = model_utils.resolve_subclass_pretrained_model_info(cls)\n return list_of_models",
"def list_models():\n\tclsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)\n\tverif = lambda cls: 'Verified: {0}'.format(cls[1]().verified)\n\tfit_models = [ (cls[0], verif(cls)) for cls in clsmembers if cls[1].__bases__[0] == core.FitModel ]\n\treturn fit_models",
"def get_live_tracked_models(self, model_class):\n return self.update_models[model_class] + self.create_models[model_class]",
"def get_models():\n\n from lmfit.models import lmfit_models\n models = lmfit_models\n if 'Expression' in models:\n del models['Expression']\n if 'Gaussian-2D' in models:\n del models['Gaussian-2D']\n\n filenames = set()\n\n models_path = pkg_resources.resource_filename('nexpy.api.frills',\n 'models')\n sys.path.append(models_path)\n for file_ in os.listdir(models_path):\n name, ext = os.path.splitext(file_)\n if name != '__init__' and ext.startswith('.py'):\n filenames.add(name)\n\n private_path = os.path.join(os.path.expanduser('~'), '.nexpy', 'models')\n if os.path.isdir(private_path):\n sys.path.append(private_path)\n for file_ in os.listdir(private_path):\n name, ext = os.path.splitext(file_)\n if name != '__init__' and ext.startswith('.py'):\n filenames.add(name)\n\n for name in sorted(filenames):\n try:\n module = importlib.import_module(name)\n models.update(dict((n.strip('Model'), m)\n for n, m in inspect.getmembers(module,\n inspect.isclass)\n if issubclass(m, Model) and n != 'Model'))\n except ImportError:\n pass\n\n return models",
"def get_supported_models(self):\n # type: () -> list\n return [model for model in self.__MODELS]",
"def models(self):\n return self._base.classes",
"def models() -> list[str]:\n return list(models_url.keys())",
"def models(self, model=None):\n for query in self.__queries:\n if isinstance(query, orb.Query):\n yield query.model(model)\n else:\n for model in query.models(model):\n yield model",
"def peak_all_models(self) -> List:\n models = list(self.meta.name)\n print(models)\n return models"
] | [
"0.70178705",
"0.66382486",
"0.6534866",
"0.631813",
"0.63044095",
"0.6285215",
"0.61792207",
"0.6041896",
"0.60361505",
"0.6007675",
"0.592958",
"0.5920479",
"0.58479476",
"0.5833444",
"0.58288264",
"0.5817674",
"0.58034897",
"0.58034897",
"0.58034897",
"0.58034897",
"0.57479924",
"0.5734192",
"0.5726884",
"0.57234335",
"0.57156134",
"0.5707454",
"0.56922764",
"0.5678311",
"0.5645432",
"0.56387645"
] | 0.7987806 | 0 |
fetcher.get_explores() should return a list of explores. | def test_get_explores(fc: fetcher.Fetcher):
explores = fc.get_explores()
assert isinstance(explores, list)
assert len(explores) > 0
assert isinstance(explores[0], models.LookmlModelExplore) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_explores_filters(fc: fetcher.Fetcher):\n explores = fc.get_explores(model=\"henry_dusty\")\n assert all(e.model_name == \"henry_dusty\" for e in explores)\n\n explores = fc.get_explores(model=\"henry_qa\", explore=\"explore_2_joins_all_used\")\n assert all(\n e.model_name == \"henry_qa\" and e.name == \"explore_2_joins_all_used\"\n for e in explores\n )",
"def test_get_unused_explores(fc: fetcher.Fetcher, test_model, test_unused_explores):\n unused_explores = fc.get_unused_explores(model=test_model[\"name\"])\n assert all(e in test_unused_explores for e in unused_explores)",
"def get_expenses(self, parameter=None):\n resp = zoho_http_client.get(base_url, self.details, self.headers, parameter)\n return parser.get_list(resp)",
"def test_get_used_explores(fc: fetcher.Fetcher, test_model, test_used_explore_names):\n used_explores = fc.get_used_explores(model=test_model[\"name\"])\n assert isinstance(used_explores, dict)\n assert all(e in test_used_explore_names for e in used_explores)",
"def explores(self, explores):\n\n self._explores = explores",
"def get_list(self, resp):\n expenses = resp['expenses']\n expense_list = ExpenseList()\n for value in expenses:\n expense = Expense()\n expense.set_expense_id(value['expense_id'])\n expense.set_date(value['date'])\n expense.set_account_name(value['account_name'])\n expense.set_paid_through_account_name(value[\\\n 'paid_through_account_name'])\n expense.set_description(value['description'])\n expense.set_currency_id(value['currency_id'])\n expense.set_currency_code(value['currency_code'])\n expense.set_bcy_total(value['bcy_total'])\n expense.set_total(value['total'])\n expense.set_is_billable(value['is_billable'])\n expense.set_reference_number(value['reference_number'])\n expense.set_customer_id(value['customer_id'])\n expense.set_customer_name(value['customer_name'])\n expense.set_vendor_id(value['vendor_id'])\n expense.set_vendor_name(value['vendor_name'])\n expense.set_status(value['status'])\n expense.set_created_time(value['created_time'])\n expense.set_expense_receipt_name(value['expense_receipt_name'])\n expense_list.set_expenses(expense)\n page_context_obj = PageContext()\n page_context = resp['page_context']\n page_context_obj.set_page(page_context['page'])\n page_context_obj.set_per_page(page_context['per_page'])\n page_context_obj.set_has_more_page(page_context['has_more_page'])\n page_context_obj.set_report_name(page_context['report_name'])\n page_context_obj.set_applied_filter(page_context['applied_filter'])\n page_context_obj.set_sort_column(page_context['sort_column'])\n page_context_obj.set_sort_order(page_context['sort_order'])\n expense_list.set_page_context(page_context)\n\n return expense_list",
"def exploits(self):\n return self.rpc.call(MsfRpcMethod.ModuleExploits)['modules']",
"def get_exploits():\n results = {}\n for loader, name, ispkg in pkgutil.walk_packages(acsploit.exploits.__path__):\n m = loader.find_module(name).load_module(name)\n\n if not ispkg and hasattr(m, 'options') and hasattr(m, 'run'):\n exploit = name.replace('.', '/')\n results[exploit] = m\n\n return results",
"def test_get_explore_fields_gets_fields_for_dimension_or_measure_only_explores(\n fc: fetcher.Fetcher, test_model, test_dimensions_or_measures_only_explores\n):\n expected = test_dimensions_or_measures_only_explores[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=expected[\"name\"])\n assert isinstance(explore, list)\n actual = explore[0]\n assert actual.name == expected[\"name\"]\n assert not (actual.fields.dimensions and actual.fields.measures)\n expected_fields = [f[\"name\"] for f in expected[\"fields\"]]\n actual_fields = fc.get_explore_fields(actual)\n assert actual_fields == expected_fields",
"def _get_exchanges(token: str) -> List[mtypes.Exchange]:\n _LOG.info(\"Getting exchanges from API ...\")\n response = get_client().service.ExchangeList(Token=token)\n\n exchanges = [\n mtypes.Exchange.from_dict(d=obj)\n for obj in zeep.helpers.serialize_object(response.EXCHANGES[\"EXCHANGE\"])\n ]\n _LOG.info(\"Got %s exchanges\", len(exchanges))\n return exchanges",
"def get_option_expirations(symbol: str, source: str = \"Nasdaq\") -> list:\n source = re.sub(r\"\\s+\", \"\", source.lower())\n output = []\n if source == \"tradier\":\n output = tradier_model.option_expirations(symbol)\n if source == \"yahoofinance\":\n output = yfinance_model.option_expirations(symbol)\n if source == \"nasdaq\":\n output = nasdaq_model.option_expirations(symbol)\n if source == \"intrinio\":\n output = intrinio_model.get_expiration_dates(symbol)\n\n if not output:\n logger.info(\"Invalid Source or Symbol\")\n console.print(\"Invalid Source or Symbol\")\n return []\n\n return output",
"def get(self) -> list:\n return self.__expedition",
"def expenses(self):\n\n return Expenses.objects.filter(\n house=self.house,\n )",
"def getExpired(self, idle=365):\n cutOff = datetime.datetime.now() - datetime.timedelta(days=idle)\n return [x for x in self.xeps if x.status == \"Experimental\" and x.date < cutOff]",
"def expiry_dates(self):\n try:\n return self._expiry_dates\n except AttributeError:\n # has to be a non-valid date, to trigger returning 'expirations'\n d = self._load_data(dt.datetime(2016, 1, 3))\n self._expiry_dates = [dt.date(x['y'], x['m'], x['d'])\n for x in d['expirations']]\n return self._expiry_dates",
"def get_all_explorations():\n return [exp_domain.Exploration(e) for e in\n exp_models.ExplorationModel.get_all()]",
"def getExpAccessions(cf):\n\tplatform = cf.get_parameter('platform')\n\tsrafetchxml = cf.get_input('srafetchxml')\n\tsraexplist = cf.get_output('sraexplist')\n\tsraxmlparser = SRAXMLParser()\n\truns = sraxmlparser.parse(srafetchxml)\n\twriter = csv.writer(open(sraexplist, 'wb'), quoting=csv.QUOTE_NONE)\n\twriter.writerow(['NCBISRAExpID'])\n\taccessions = []\n\tfor run in runs:\n\t\tif platform and \\\n\t\t\tnot run.platform == platform:\n\t\t\tcontinue\n\t\telif not run.exp_accession in accessions:\n\t\t\twriter.writerow([run.exp_accession])\n\t\t\taccessions.append(run.exp_accession)\n\tcf.write_log(\"GetExpAccessions: wrote %s experiment accessions\" % len(accessions))\n\treturn constants.OK",
"def _getExponentialValues(self, arr):\r\n return [math.exp(val) for val in arr]",
"def list(self):\n path = \"authSettings/exemptedUrls\"\n return self._session.get(path)",
"def get_artifacts(token, artifact_names, start, end):\n\n artifacts = []\n page = 1\n retry_limit = 3\n while True:\n req = Request(URL + f\"&page={page}\")\n req.add_header(\"Accept\", \"application/vnd.github.v3+json\")\n req.add_header(\"Authorization\", f\"token {token}\")\n with urlopen(req) as r:\n # Handle hitting the GitHub rate limit\n # If the reset time is < 90s in the future, wait for it (trying 3 times)\n # Otherwise raise an error\n if r.status == 403:\n try:\n reset = int(r.headers.get(\"X-RateLimit-Reset\"))\n except:\n raise RuntimeError(\"Hit GitHub rate limit. Reset header missing.\")\n if retry_limit == 0 or time.time() > reset or reset - time.time() > 90:\n raise RuntimeError(\"Hit GitHub rate limit. Reset is at %s\" % time.ctime(reset))\n\n # Try waiting until after the reset time\n time.sleep(10 + (reset - time.time()))\n retry_limit = retry_limit - 1\n continue\n\n if r.status != 200:\n raise RuntimeError(\"Error (%d) with API request: %s\" % (r.status, str(r)))\n\n data = json.load(r)\n\n # Only include the artifacts within the date range and names\n for a in data[\"artifacts\"]:\n if a[\"name\"] not in artifact_names:\n continue\n updated_at = datetime.fromisoformat(a[\"updated_at\"][:-1])\n if start <= updated_at <= end:\n artifacts.append(a)\n\n if len(data[\"artifacts\"]) < 100:\n break\n\n # There are more results, get the next page\n page = page + 1\n\n # Avoid hitting per-second rate limits\n time.sleep(2)\n\n return sorted(artifacts, key=lambda x: x[\"updated_at\"])",
"def experiences(self):\n return self.client.call('GET',\n self.name + 'experiences')",
"def get_option_expirations(\n self, symbol: str, include_all_roots: bool = None, strikes: str = None\n ) -> List[date]:\n url = \"/v1/markets/options/expirations\"\n params = {\n \"symbol\": symbol,\n \"includeAllRoots\": include_all_roots,\n \"strikes\": strikes,\n }\n\n data = self.get(url, params)\n res = MarketsAPIResponse(**data)\n return res.expirations.date",
"def return_expenses():\r\n g.db.execute(\"SELECT * FROM monthly_data ORDER BY Sr\")\r\n rows = g.db.fetchall()\r\n data = []\r\n for x in rows:\r\n data.append({'sr':x[0],'name':x[1], 'id':x[2], 'item':x[3], 'price':x[5], 'date':x[4]})\r\n return jsonify(data)",
"def test_get_explore_fields_gets_fields(\n fc: fetcher.Fetcher, test_model, test_explores_stats\n):\n test_explore = test_explores_stats[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=test_explore[\"name\"])\n assert isinstance(explore, list)\n explore = explore[0]\n assert isinstance(explore, models.LookmlModelExplore)\n assert explore.model_name == test_model[\"name\"]\n assert explore.name == test_explore[\"name\"]\n fields = fc.get_explore_fields(explore)\n assert isinstance(fields, list)\n assert fields == test_explore[\"all_fields\"]",
"def test_api_can_get_all_expenses(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n res = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=self.expense)\n self.assertEqual(res.status_code, 201)\n res = self.client().get('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(res.status_code, 200)\n results = json.loads(res.data)\n self.assertEqual(results['items'][0]['name'], self.expense['name'])",
"def list(self, request):\n currentYear = datetime.now().year\n expenses = Expenses.objects.filter(\n date_purchased__contains=currentYear)\n serializer = ExpenseSerializer(\n expenses, many=True, context={'request': request})\n return Response(serializer.data)",
"def get_scans_list(server_url, exp_no, return_list=False):\n if server_url.endswith('/') is False:\n server_url = '%s/' % server_url\n data_dir_url = '%sexp%d/Datafiles' % (server_url, exp_no)\n\n does_exist, raw_lines = check_url(data_dir_url, read_lines=True)\n if does_exist is False:\n return \"Experiment %d's URL %s cannot be found.\" % (exp_no, data_dir_url)\n\n # Scan through the index page\n scan_list = []\n header = 'HB3A_exp%04d_scan' % exp_no\n for line in raw_lines:\n if line.count(header) > 0:\n # try to find file HB3A_exp0123_scan6789.dat\n term = line.split(header)[1].split('.dat')[0]\n scan = int(term)\n # check\n if '%04d' % scan == term:\n scan_list.append(scan)\n # END_FOR\n scan_list = sorted(scan_list)\n if return_list is True:\n return scan_list\n\n message = 'Experiment %d: Scan from %d to %d' % (exp_no, scan_list[0], scan_list[-1])\n\n return message",
"def get_exchanges():\n url = 'https://help.yahoo.com/kb/finance-for-web/SLN2310.html?impressions=true'\n dataframes = pd.read_html(url)\n return dataframes[0]",
"def ListArtifacts(context=None):\n args = artifact_pb2.ApiListArtifactsArgs()\n\n items = context.SendIteratorRequest(\"ListArtifacts\", args)\n return utils.MapItemsIterator(\n lambda data: Artifact(data=data, context=context), items)",
"def _CheckExpirations(file_objs):\n expired = []\n unexpired = []\n for file_obj in file_objs:\n if _IsExpired(file_obj):\n expired.append(file_obj)\n else:\n unexpired.append(file_obj)\n return expired, unexpired"
] | [
"0.6731509",
"0.6563917",
"0.6551571",
"0.63968503",
"0.63752097",
"0.5857632",
"0.57382774",
"0.5567012",
"0.54315007",
"0.54032236",
"0.5390805",
"0.53766435",
"0.52719086",
"0.52510387",
"0.52280146",
"0.52221656",
"0.5203557",
"0.5185477",
"0.5180662",
"0.5156471",
"0.5141725",
"0.5137997",
"0.5133569",
"0.51140064",
"0.51082355",
"0.5100608",
"0.5095252",
"0.5072092",
"0.50504905",
"0.49986422"
] | 0.7531671 | 0 |
fetcher.get_explores() should be able to filter on model and/or explore. | def test_get_explores_filters(fc: fetcher.Fetcher):
explores = fc.get_explores(model="henry_dusty")
assert all(e.model_name == "henry_dusty" for e in explores)
explores = fc.get_explores(model="henry_qa", explore="explore_2_joins_all_used")
assert all(
e.model_name == "henry_qa" and e.name == "explore_2_joins_all_used"
for e in explores
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_explores(fc: fetcher.Fetcher):\n explores = fc.get_explores()\n assert isinstance(explores, list)\n assert len(explores) > 0\n assert isinstance(explores[0], models.LookmlModelExplore)",
"def test_get_used_explores(fc: fetcher.Fetcher, test_model, test_used_explore_names):\n used_explores = fc.get_used_explores(model=test_model[\"name\"])\n assert isinstance(used_explores, dict)\n assert all(e in test_used_explore_names for e in used_explores)",
"def test_get_explore_fields_gets_fields_for_dimension_or_measure_only_explores(\n fc: fetcher.Fetcher, test_model, test_dimensions_or_measures_only_explores\n):\n expected = test_dimensions_or_measures_only_explores[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=expected[\"name\"])\n assert isinstance(explore, list)\n actual = explore[0]\n assert actual.name == expected[\"name\"]\n assert not (actual.fields.dimensions and actual.fields.measures)\n expected_fields = [f[\"name\"] for f in expected[\"fields\"]]\n actual_fields = fc.get_explore_fields(actual)\n assert actual_fields == expected_fields",
"def test_get_explore_fields_gets_fields(\n fc: fetcher.Fetcher, test_model, test_explores_stats\n):\n test_explore = test_explores_stats[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=test_explore[\"name\"])\n assert isinstance(explore, list)\n explore = explore[0]\n assert isinstance(explore, models.LookmlModelExplore)\n assert explore.model_name == test_model[\"name\"]\n assert explore.name == test_explore[\"name\"]\n fields = fc.get_explore_fields(explore)\n assert isinstance(fields, list)\n assert fields == test_explore[\"all_fields\"]",
"def test_get_unused_explores(fc: fetcher.Fetcher, test_model, test_unused_explores):\n unused_explores = fc.get_unused_explores(model=test_model[\"name\"])\n assert all(e in test_unused_explores for e in unused_explores)",
"def test_get_explores_throws_if_model_or_explore_does_not_exist(\n fc: fetcher.Fetcher, model: Optional[str], explore: Optional[str], msg: str\n):\n with pytest.raises(exceptions.NotFoundError) as exc:\n fc.get_explores(model=model, explore=explore)\n assert msg in str(exc.value)",
"def test_get_explore_field_stats(\n fc: fetcher.Fetcher,\n looker_sdk: methods.Looker40SDK,\n test_model,\n test_used_explore_names,\n test_explores_stats,\n):\n explore = fc.get_explores(\n model=test_model[\"name\"], explore=test_used_explore_names[0]\n )[0]\n actual_stats = fc.get_explore_field_stats(explore)\n assert isinstance(actual_stats, dict)\n\n for e in test_explores_stats:\n if e[\"name\"] == test_used_explore_names[0]:\n expected_stats = e\n\n assert all(actual_stats[k] == 0 for k in expected_stats[\"unused_fields\"])\n assert all(actual_stats[k] > 0 for k in expected_stats[\"used_fields\"])",
"def get_queryset(self):\n user = self.request.user\n expenses = Expense.objects.filter(\n Q(userexpense__in=user.userexpense_set.all())\n | Q(group__in=user.group_set.all()))\n\n if self.request.query_params.get('q', None) is not None:\n expenses = expenses.filter(\n description__icontains=self.request.query_params.get(\n 'q', None))\n return expenses",
"def get_all_explorations():\n return [exp_domain.Exploration(e) for e in\n exp_models.ExplorationModel.get_all()]",
"def get_viewable_explorations(user_id):\n return [exp_domain.Exploration(e) for e in\n exp_models.ExplorationModel.get_viewable_explorations(user_id)]",
"def refresh_index_page_filter_by(request, exposure_sequence):",
"def explore_view(request):\r\n # explore items\r\n user = request.user.userprofile\r\n items = Item.objects.explore(user)\r\n context = {'items':items}\r\n return render(request, 'explore/explore.html', context)",
"def get_expenses(self, parameter=None):\n resp = zoho_http_client.get(base_url, self.details, self.headers, parameter)\n return parser.get_list(resp)",
"def get_list(self, resp):\n expenses = resp['expenses']\n expense_list = ExpenseList()\n for value in expenses:\n expense = Expense()\n expense.set_expense_id(value['expense_id'])\n expense.set_date(value['date'])\n expense.set_account_name(value['account_name'])\n expense.set_paid_through_account_name(value[\\\n 'paid_through_account_name'])\n expense.set_description(value['description'])\n expense.set_currency_id(value['currency_id'])\n expense.set_currency_code(value['currency_code'])\n expense.set_bcy_total(value['bcy_total'])\n expense.set_total(value['total'])\n expense.set_is_billable(value['is_billable'])\n expense.set_reference_number(value['reference_number'])\n expense.set_customer_id(value['customer_id'])\n expense.set_customer_name(value['customer_name'])\n expense.set_vendor_id(value['vendor_id'])\n expense.set_vendor_name(value['vendor_name'])\n expense.set_status(value['status'])\n expense.set_created_time(value['created_time'])\n expense.set_expense_receipt_name(value['expense_receipt_name'])\n expense_list.set_expenses(expense)\n page_context_obj = PageContext()\n page_context = resp['page_context']\n page_context_obj.set_page(page_context['page'])\n page_context_obj.set_per_page(page_context['per_page'])\n page_context_obj.set_has_more_page(page_context['has_more_page'])\n page_context_obj.set_report_name(page_context['report_name'])\n page_context_obj.set_applied_filter(page_context['applied_filter'])\n page_context_obj.set_sort_column(page_context['sort_column'])\n page_context_obj.set_sort_order(page_context['sort_order'])\n expense_list.set_page_context(page_context)\n\n return expense_list",
"def get_queryset(self):\n #print(\"request\", self.request)\n user = self.request.user\n return Experience.objects.filter(person=user)",
"def validate(self, mode: QueryMode = \"batch\") -> Dict[str, Any]:\n self._query_by_task_id = {}\n explore_count = self._count_explores()\n printer.print_header(\n f\"Testing {explore_count} \"\n f\"{'explore' if explore_count == 1 else 'explores'} \"\n f\"[{mode} mode] \"\n f\"[concurrency = {self.query_slots}]\"\n )\n\n self._create_and_run(mode)\n if mode == \"hybrid\" and self.project.errored:\n self._create_and_run(mode)\n\n for model in sorted(self.project.models, key=lambda x: x.name):\n for explore in sorted(model.explores, key=lambda x: x.name):\n message = f\"{model.name}.{explore.name}\"\n printer.print_validation_result(\n passed=not explore.errored, source=message\n )\n\n return self.project.get_results(mode)",
"def get_images_by_vulnerability(self, **kwargs):\n ...",
"def get_recommendations(artists = tuple(), genres = tuple(), limit = 20, features = True, client = None):\n\n recs = client.recommendations(seed_artists = artists, seed_genres = genres, limit = limit)\n tracks = recs['tracks']\n\n # TODO: need a compose function...\n to_keep = (\n 'album_name', 'artist_name', 'name', 'popularity', 'duration_ms',\n 'explicit', 'id'\n )\n rows = list(map(row_filter(to_keep, False), map(_hoist_track_info, tracks)))\n out = pd.DataFrame(rows)\n\n track_ids = [row['id'] for row in rows]\n if features:\n extra_cols = ['uri', 'type', 'duration_ms', 'analysis_url', 'track_href']\n return out.merge(\n get_track_features(track_ids).drop(columns = extra_cols),\n on = \"id\"\n )\n\n return out",
"def get_public_explorations():\n return [exp_domain.Exploration(e) for e in\n exp_models.ExplorationModel.get_public_explorations()]",
"def test_get_list_most_expensive(self):\n\n expensive_goods_test = self.info_list.get_list_most_expensive()\n most_expensive_test = self.form_expensive_list_goods()\n\n self.assertEqual(expensive_goods_test, most_expensive_test)",
"def related_view_filter():\n pass",
"def get_recommendations(self):\n endpoints = '/user/recs'\n return self.get_request(endpoints)",
"def test_get_scored_recommendations_post(self):\n pass",
"def fetch_from_db(self):\n self._potential_deals = DBApi.get_instance().potential_records\n self._filters = DBApi.get_instance().filters\n # Add markdown for url\n for data in self._potential_deals:\n data[\"url\"] = f\"[Link]({data['url']})\"\n self._potential_deals_cols = self._db_api.get_potential_deal_columns()\n self._years = self._db_api.get_unique_years(self._potential_deals)\n self._make_model = self._db_api.get_all_make_models()\n self._action_options = [\"Action1\", \"Action2\", \"Action3\"]",
"def list_extractors(age_limit=None):\n return [ie() for ie in list_extractor_classes(age_limit)]",
"def get_queryset(self):\n\n user = get_authentication(self.request)\n queryset = Histories.objects.filter(user=user, is_used=True)\n\n return queryset",
"def index( self, trans, **kwd ):\n # Example URL: http://localhost:9009/api/repository_revisions\n repository_metadata_dicts = []\n # Build up an anded clause list of filters.\n clause_list = []\n # Filter by downloadable if received.\n downloadable = kwd.get( 'downloadable', None )\n if downloadable is not None:\n clause_list.append( trans.model.RepositoryMetadata.table.c.downloadable == util.string_as_bool( downloadable ) )\n # Filter by malicious if received.\n malicious = kwd.get( 'malicious', None )\n if malicious is not None:\n clause_list.append( trans.model.RepositoryMetadata.table.c.malicious == util.string_as_bool( malicious ) )\n # Filter by tools_functionally_correct if received.\n tools_functionally_correct = kwd.get( 'tools_functionally_correct', None )\n if tools_functionally_correct is not None:\n clause_list.append( trans.model.RepositoryMetadata.table.c.tools_functionally_correct == util.string_as_bool( tools_functionally_correct ) )\n # Filter by missing_test_components if received.\n missing_test_components = kwd.get( 'missing_test_components', None )\n if missing_test_components is not None:\n clause_list.append( trans.model.RepositoryMetadata.table.c.missing_test_components == util.string_as_bool( missing_test_components ) )\n # Filter by do_not_test if received.\n do_not_test = kwd.get( 'do_not_test', None )\n if do_not_test is not None:\n clause_list.append( trans.model.RepositoryMetadata.table.c.do_not_test == util.string_as_bool( do_not_test ) )\n # Filter by includes_tools if received.\n includes_tools = kwd.get( 'includes_tools', None )\n if includes_tools is not None:\n clause_list.append( trans.model.RepositoryMetadata.table.c.includes_tools == util.string_as_bool( includes_tools ) )\n # Filter by test_install_error if received.\n test_install_error = kwd.get( 'test_install_error', None )\n if test_install_error is not None:\n clause_list.append( trans.model.RepositoryMetadata.table.c.test_install_error == util.string_as_bool( test_install_error ) )\n # Filter by skip_tool_test if received.\n skip_tool_test = kwd.get( 'skip_tool_test', None )\n if skip_tool_test is not None:\n skip_tool_test = util.string_as_bool( skip_tool_test )\n skipped_metadata_ids_subquery = select( [ trans.app.model.SkipToolTest.table.c.repository_metadata_id ] )\n if skip_tool_test:\n clause_list.append( trans.model.RepositoryMetadata.id.in_( skipped_metadata_ids_subquery ) )\n else:\n clause_list.append( not_( trans.model.RepositoryMetadata.id.in_( skipped_metadata_ids_subquery ) ) )\n # Generate and execute the query.\n try:\n query = trans.sa_session.query( trans.app.model.RepositoryMetadata ) \\\n .filter( and_( *clause_list ) ) \\\n .order_by( trans.app.model.RepositoryMetadata.table.c.repository_id ) \\\n .all()\n for repository_metadata in query:\n repository_metadata_dict = repository_metadata.get_api_value( view='collection',\n value_mapper=default_value_mapper( trans, repository_metadata ) )\n repository_metadata_dict[ 'url' ] = web.url_for( controller='repository_revisions',\n action='show',\n id=trans.security.encode_id( repository_metadata.id ) )\n repository_metadata_dicts.append( repository_metadata_dict )\n return repository_metadata_dicts\n except Exception, e:\n message = \"Error in the Tool Shed repository_revisions API in index: \" + str( e )\n log.error( message, exc_info=True )\n trans.response.status = 500\n return message",
"def test_api_can_get_all_expenses(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n res = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=self.expense)\n self.assertEqual(res.status_code, 201)\n res = self.client().get('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(res.status_code, 200)\n results = json.loads(res.data)\n self.assertEqual(results['items'][0]['name'], self.expense['name'])",
"def explores(self, explores):\n\n self._explores = explores",
"def expense_history(request):\n qs: QuerySet = Expense.objects.by_user(request.user.id)\n file_title: str = \"Latest_150_Expenses\"\n form = ExpenseHistory(request.GET)\n if form.is_valid():\n cd: dict = form.cleaned_data\n target: str = cd[\"target\"]\n user_id = request.user.id\n if target == \"date\":\n qs = Expense.objects.filter(date=cd[\"date1\"], user_id=user_id)\n file_title = f'For_{cd[\"date1\"]}'\n elif target == \"each_month\":\n qs = Expense.objects.filter(date__month=cd[\"month\"], user_id=user_id)\n file_title = f\"Every_{calendar.month_name[cd['month']]}_Month\"\n elif target == \"months\":\n qs = Expense.objects.last_n_months_expense(cd[\"p_months\"], user_id)\n file_title = f\"Last_{cd['p_months']}_months\"\n elif target == \"month\":\n qs = Expense.objects.month_expense(cd[\"month\"], cd[\"year\"], user_id)\n file_title = f'For_{calendar.month_name[cd[\"month\"]]}-{cd[\"year\"]}'\n elif target == \"year\":\n qs = Expense.objects.year_expense(cd[\"year\"], user_id)\n file_title = f\"{cd['year']}\"\n elif target == \"between\":\n qs = Expense.objects.filter(date__gte=cd[\"date1\"], date__lte=cd[\"date2\"],\n user__id=user_id)\n file_title = f'Between_{cd[\"date1\"]}_{cd[\"date2\"]}'\n qs = qs.order_by(\"-date\", \"-id\").values_list(\n \"date\", \"description\", \"category__name\", \"method\", \"app\", \"amount\",\n )\n if not form.is_valid():\n qs = qs[:150]\n qs_list = []\n if qs:\n for q in qs:\n qs_list.append([\n q[0], q[1], q[2], METHOD_DICT[q[3]], APP_DICT.get(q[4], \"Other\"), q[5]\n ])\n file_title = f\"{date.today()}_\" + file_title\n return render(request, \"tracker/history.html\",\n {\"qs\": qs_list, \"file_title\": file_title, \"form\": form})"
] | [
"0.753411",
"0.6666877",
"0.6630365",
"0.63271785",
"0.6167889",
"0.5833313",
"0.55136293",
"0.53865874",
"0.53228235",
"0.53056663",
"0.5212486",
"0.51720136",
"0.51571435",
"0.5055647",
"0.50480455",
"0.50150645",
"0.49779034",
"0.49160555",
"0.49039322",
"0.4895284",
"0.4848704",
"0.482605",
"0.4824581",
"0.48242378",
"0.48210782",
"0.48201323",
"0.48130566",
"0.4805133",
"0.47655815",
"0.47494656"
] | 0.822642 | 0 |
fetcher.get_explores() should throw if an explore/model is not found. | def test_get_explores_throws_if_model_or_explore_does_not_exist(
fc: fetcher.Fetcher, model: Optional[str], explore: Optional[str], msg: str
):
with pytest.raises(exceptions.NotFoundError) as exc:
fc.get_explores(model=model, explore=explore)
assert msg in str(exc.value) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_explores(fc: fetcher.Fetcher):\n explores = fc.get_explores()\n assert isinstance(explores, list)\n assert len(explores) > 0\n assert isinstance(explores[0], models.LookmlModelExplore)",
"def test_get_explores_filters(fc: fetcher.Fetcher):\n explores = fc.get_explores(model=\"henry_dusty\")\n assert all(e.model_name == \"henry_dusty\" for e in explores)\n\n explores = fc.get_explores(model=\"henry_qa\", explore=\"explore_2_joins_all_used\")\n assert all(\n e.model_name == \"henry_qa\" and e.name == \"explore_2_joins_all_used\"\n for e in explores\n )",
"def test_get_used_explores(fc: fetcher.Fetcher, test_model, test_used_explore_names):\n used_explores = fc.get_used_explores(model=test_model[\"name\"])\n assert isinstance(used_explores, dict)\n assert all(e in test_used_explore_names for e in used_explores)",
"def test_get_unused_explores(fc: fetcher.Fetcher, test_model, test_unused_explores):\n unused_explores = fc.get_unused_explores(model=test_model[\"name\"])\n assert all(e in test_unused_explores for e in unused_explores)",
"def test_get_explore_fields_gets_fields_for_dimension_or_measure_only_explores(\n fc: fetcher.Fetcher, test_model, test_dimensions_or_measures_only_explores\n):\n expected = test_dimensions_or_measures_only_explores[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=expected[\"name\"])\n assert isinstance(explore, list)\n actual = explore[0]\n assert actual.name == expected[\"name\"]\n assert not (actual.fields.dimensions and actual.fields.measures)\n expected_fields = [f[\"name\"] for f in expected[\"fields\"]]\n actual_fields = fc.get_explore_fields(actual)\n assert actual_fields == expected_fields",
"def test_get_explore_fields_gets_fields(\n fc: fetcher.Fetcher, test_model, test_explores_stats\n):\n test_explore = test_explores_stats[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=test_explore[\"name\"])\n assert isinstance(explore, list)\n explore = explore[0]\n assert isinstance(explore, models.LookmlModelExplore)\n assert explore.model_name == test_model[\"name\"]\n assert explore.name == test_explore[\"name\"]\n fields = fc.get_explore_fields(explore)\n assert isinstance(fields, list)\n assert fields == test_explore[\"all_fields\"]",
"def get_all_explorations():\n return [exp_domain.Exploration(e) for e in\n exp_models.ExplorationModel.get_all()]",
"def test_get_explore_field_stats(\n fc: fetcher.Fetcher,\n looker_sdk: methods.Looker40SDK,\n test_model,\n test_used_explore_names,\n test_explores_stats,\n):\n explore = fc.get_explores(\n model=test_model[\"name\"], explore=test_used_explore_names[0]\n )[0]\n actual_stats = fc.get_explore_field_stats(explore)\n assert isinstance(actual_stats, dict)\n\n for e in test_explores_stats:\n if e[\"name\"] == test_used_explore_names[0]:\n expected_stats = e\n\n assert all(actual_stats[k] == 0 for k in expected_stats[\"unused_fields\"])\n assert all(actual_stats[k] > 0 for k in expected_stats[\"used_fields\"])",
"def validate(self, mode: QueryMode = \"batch\") -> Dict[str, Any]:\n self._query_by_task_id = {}\n explore_count = self._count_explores()\n printer.print_header(\n f\"Testing {explore_count} \"\n f\"{'explore' if explore_count == 1 else 'explores'} \"\n f\"[{mode} mode] \"\n f\"[concurrency = {self.query_slots}]\"\n )\n\n self._create_and_run(mode)\n if mode == \"hybrid\" and self.project.errored:\n self._create_and_run(mode)\n\n for model in sorted(self.project.models, key=lambda x: x.name):\n for explore in sorted(model.explores, key=lambda x: x.name):\n message = f\"{model.name}.{explore.name}\"\n printer.print_validation_result(\n passed=not explore.errored, source=message\n )\n\n return self.project.get_results(mode)",
"def fetch_gene_descriptions(self, metrics, coeff='cohen', nih_fetch_num=20, alpha=.05, **kwargs):\n if 'verbose' not in kwargs:\n kwargs['verbose'] = True\n if 'nih_dl' not in kwargs:\n kwargs['nih_dl'] = False\n\n if kwargs['nih_dl'] == False:\n if 'csv_path' not in kwargs:\n raise ValueError(\"'csv_path' argument in **kwargs missing; provide argument or use 'nih_dl':True .\")\n\n top_genes = []\n if coeff == 'cohen':\n # Checking if users mixed up coeff/metric parameters\n if isinstance(metrics, list):\n raise ValueError(\"list passed with coeff='cohen'; if you want to use Spearman's Rho use coeff='spearman'.\")\n\n for rec in metrics['results'][:nih_fetch_num]:\n try:\n if kwargs['nih_dl']:\n gene_name, gene_description = gene_info(str(rec.entrez))\n else:\n gene_dat = get_local_gene_info(kwargs['csv_path'], [rec.entrez])\n gene_name = gene_dat[0].name\n gene_description = gene_dat[0].description\n top_genes.append((rec.entrez, rec.cohen_d, rec.p_value, gene_name, gene_description))\n except IndexError:\n continue\n\n if kwargs['verbose']:\n print \"\\nCorrected Bonferroni Alpha: %.3E\\n\\n\" % (alpha / float(metrics['gene_sample_size']))\n for eid, coh_d, p_val, gene_i, descr in top_genes:\n if len(descr) == 1:\n print \"%d (p = %.3E; d = %.3f): < No description found >\\n\\n\" % (eid, p_val, coh_d)\n else:\n print \"%d (p = %.3E; d = %.3f): %s\\n\\n\" % (eid, p_val, coh_d, descr)\n elif coeff == 'spearman':\n # Checking if users mixed up coeff/metric parameters\n if isinstance(metrics, dict):\n raise ValueError(\"dict passed with coeff='spearman'; if you want to use Cohen's d use coeff='cohen'.\")\n\n top_ids = np.argsort(metrics)[:nih_fetch_num]\n\n top_rs = [r for r in reversed(np.sort(metrics))][:nih_fetch_num]\n top_genes = []\n for x in xrange(len(top_ids)):\n try:\n if kwargs['nih_dl']:\n gene_name, gene_description = gene_info(str(top_ids[x]))\n else:\n gene_dat = get_gene_info(kwargs['csv_path'], [top_ids[x]])\n gene_name = gene_dat[0].name\n gene_description = gene_dat[0].description\n top_genes.append((int(top_ids[x]), top_rs[x], gene_name, gene_description))\n except IndexError:\n continue\n\n if kwargs['verbose']:\n print \"\\nCorrected Bonferroni Alpha: %.3E\\n\\n\" % (alpha/float(len(self.no.ge.keys())))\n for eid, rho, gene_i, descr in top_genes:\n if len(descr) == 1:\n print \"%d (r = %.3f): < No description found >\\n\\n\" % (eid, rho)\n else:\n print \"%d (r = %.3f): %s\\n %s\\n\\n\" % (eid, rho, gene_i, descr)\n\n else:\n raise ValueError(\"Invalid parameter value for 'coeff'; use 'spearman' or 'cohen'.\")\n\n return top_genes",
"def get(self, url_to_get=None): # pylint: disable=too-many-branches\n\n next_url = None\n if not url_to_get:\n url_to_get = self.url_to_get\n\n if self.etags and url_to_get in self.etags:\n self.headers[\"If-None-Match\"] = self.etags[url_to_get]\n\n req = get(url_to_get, headers=self.headers)\n\n if req.status_code == 200:\n data = req.json()\n repos = []\n\n if \"Etag\" in req.headers:\n self.etags[url_to_get] = req.headers[\"Etag\"]\n Helpers.Dict(self.etags).to_json(Settings.etags_file)\n\n if isinstance(data, list):\n repos.extend(data)\n else:\n raise Exception(\n \"Unable to understand GitHub API response for: '%s'.\" % url_to_get\n )\n\n if \"Link\" in req.headers:\n next_url = Helpers.Regex(\n req.headers[\"Link\"], self.regex_next_url, group=1, return_data=True\n ).match()\n\n if next_url:\n for element in self.get(url_to_get=next_url):\n if element[\"name\"] not in Settings.repo_to_ignore:\n yield element\n else:\n continue\n\n if repos:\n for element in repos:\n if element[\"name\"] not in Settings.repo_to_ignore:\n yield element\n else:\n continue\n\n elif req.status_code == 304:\n data = Helpers.Dict.from_json(\n Helpers.File(Settings.repositories_file).read()\n )\n\n for element in data:\n if element[\"name\"] not in Settings.repo_to_ignore:\n yield element\n else:\n continue\n elif req.status_code == 401:\n raise Exception(\"Bad GitHub credentials.\")\n else:\n raise Exception(\n \"Somethign went wrong while communicating with: '%s'.\" % url_to_get\n )",
"def test_obtain_issues_http_error(self, mock_url_read):\n mock_url_read.side_effect = urllib.error.HTTPError('raise', None, None, None, None)\n self.__report.obtain_issues(['id'], 'high')\n issues = self.__report.issues()\n self.assertIsInstance(issues, List)\n self.assertEqual(len(issues), 0)",
"def get_expenses(self, parameter=None):\n resp = zoho_http_client.get(base_url, self.details, self.headers, parameter)\n return parser.get_list(resp)",
"def maybe_download():\n\n print(\"Downloading Inception 5h Model ...\")\n download.maybe_download_and_extract(url=data_url, download_dir=data_dir)",
"def test_obtain_issues_no_query(self, mock_url_read):\n mock_url_read.side_effect = \\\n [LAST_SCAN, '{\"reportId\": 22}', '{\"status\": {\"value\": \"Created\"}}', '<CxXMLResults />']\n self.__report.obtain_issues(['id'], 'high')\n issues = self.__report.issues()\n self.assertIsInstance(issues, List)\n self.assertEqual(len(issues), 0)",
"def get_exploration_by_id(exploration_id, strict=True):\n exploration_memcache_key = _get_exploration_memcache_key(exploration_id)\n memcached_exploration = memcache_services.get_multi(\n [exploration_memcache_key]).get(exploration_memcache_key)\n\n if memcached_exploration is not None:\n return memcached_exploration\n else:\n exploration_model = exp_models.ExplorationModel.get(\n exploration_id, strict=strict)\n if exploration_model:\n exploration = exp_domain.Exploration(exploration_model)\n memcache_services.set_multi({\n exploration_memcache_key: exploration})\n return exploration\n else:\n return None",
"async def get_model_evaluation(\n get_model_evaluation_request: DescriptionModels,\n token: str = Depends(oauth2_scheme),\n):\n try:\n logging.info(\"Calling /gcp/automl/get_model_evaluation endpoint\")\n logging.debug(f\"Request: {get_model_evaluation_request}\")\n if decodeJWT(token=token):\n response = ManageModelController().get_model_evaluation_controller(\n request=get_model_evaluation_request\n )\n return response\n else:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Invalid access token\",\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n except Exception as error:\n logging.error(f\"Error in /gcp/automl/get_model_evaluation endpoint: {error}\")\n raise error",
"def _count_explores(self) -> int:\n explore_count = 0\n for model in self.project.models:\n explore_count += len(model.explores)\n return explore_count",
"def test_obtain_issues(self, mock_url_read):\n mock_url_read.side_effect = [LAST_SCAN, '{\"reportId\": 22}', '{\"status\": {\"value\": \"Created\"}}',\n SAST_REPORT.format(false_positive=False, severity='High')]\n\n self.__report.obtain_issues(['id'], 'high')\n issues = self.__report.issues()\n\n self.assertIsInstance(issues, List)\n self.assertIsInstance(issues[0], Checkmarx.Issue)\n self.assertEqual('JScript Vulnerabilities', issues[0].group)\n self.assertEqual('Reflected XSS', issues[0].title)\n self.assertEqual('http://url/CxWebClient/ScanQueryDescription.aspx?queryID=789&'\n 'queryVersionCode=842956&queryTitle=Reflected_XSS', issues[0].display_url)\n self.assertEqual(1, issues[0].count)\n self.assertEqual(\"Recurrent\", issues[0].status)",
"def get_exploits():\n results = {}\n for loader, name, ispkg in pkgutil.walk_packages(acsploit.exploits.__path__):\n m = loader.find_module(name).load_module(name)\n\n if not ispkg and hasattr(m, 'options') and hasattr(m, 'run'):\n exploit = name.replace('.', '/')\n results[exploit] = m\n\n return results",
"def test_get_models_throws_if_model_does_not_exist(fc: fetcher.Fetcher, project, model):\n with pytest.raises(exceptions.NotFoundError) as exc:\n fc.get_models(project=project, model=model)\n assert \"An error occured while getting models.\" in str(exc.value)",
"def lookml_model_explore_with_http_info(self, lookml_model_name, explore_name, **kwargs):\n\n all_params = ['lookml_model_name', 'explore_name', 'fields']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method lookml_model_explore\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'lookml_model_name' is set\n if ('lookml_model_name' not in params) or (params['lookml_model_name'] is None):\n raise ValueError(\"Missing the required parameter `lookml_model_name` when calling `lookml_model_explore`\")\n # verify the required parameter 'explore_name' is set\n if ('explore_name' not in params) or (params['explore_name'] is None):\n raise ValueError(\"Missing the required parameter `explore_name` when calling `lookml_model_explore`\")\n\n\n collection_formats = {}\n\n resource_path = '/lookml_models/{lookml_model_name}/explores/{explore_name}'.replace('{format}', 'json')\n path_params = {}\n if 'lookml_model_name' in params:\n path_params['lookml_model_name'] = params['lookml_model_name']\n if 'explore_name' in params:\n path_params['explore_name'] = params['explore_name']\n\n query_params = {}\n if 'fields' in params:\n query_params['fields'] = params['fields']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='LookmlModelExplore',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)",
"def fetch_data():\n for category in CHEATSHEETS.items():\n subprocess.call(f'curl -o {PATH}{category[0] + \".csv\"} {category[1]}', shell=True)\n\n index = -1\n for filename in os.listdir(PATH):\n for idx, row in pd.read_csv(PATH + filename, on_bad_lines='skip').replace(np.nan, '').iterrows():\n name = row['Model']\n url = REDIRECT_URL + name.lower()\n category = filename.split('.')[0]\n featurizers = row['Acceptable Featurizers'].split(' ') if row['Acceptable Featurizers'] != '' else []\n backends = ['PyTorch' if item in {\"PTorch\", \"Torch\", \"PyTorch \"} else item for item in row['Backend'].split('/')]\n types = row['Type'] if filename != 'general.csv' else row['Classifier/Regressor']\n types = types.split('/') if filename == 'material.csv' else types.split('/ ')\n index += 1\n\n backend_list.append(backends)\n type_list.append(types)\n featurizer_list.append(featurizers)\n model_list.append(Model(name, url, category, featurizers, backends, types, index))",
"def get_viewable_explorations(user_id):\n return [exp_domain.Exploration(e) for e in\n exp_models.ExplorationModel.get_viewable_explorations(user_id)]",
"def unfound_entities(token, path='', file='result.csv', save=False, chunk_size=128):\n\n headers = {\n 'accept': 'text/csv',\n }\n\n try:\n response = requests.get(\n 'https://reactome.org/AnalysisService/download/%s/entities/notfound/%s' % (token, file),\n headers=headers)\n except ConnectionError as e:\n print(e)\n\n if response.status_code == 200:\n if save:\n with open(\"\".join([path, file]), 'wb') as f:\n for chunk in response.iter_content(chunk_size=chunk_size):\n f.write(chunk)\n else:\n gene_list = response.text.split('\\n')\n df_list = [row.split(\",\") for row in gene_list[:-1]]\n df = pandas.DataFrame(df_list)\n df = df.iloc[1:]\n return df\n else:\n print('Status code returned a value of %s' % response.status_code)",
"def test_fetch_no_results():\n url = (\n \"https://gliders.ioos.us/erddap/search/index.csv?page=1&itemsPerPage=100000&searchFor\"\n '=\"incredibly_long_string_that_should_never_match_a_real_dataset\" '\n )\n key = \"ioos\"\n data = fetch_results(url, key)\n assert data is None",
"def get_public_explorations():\n return [exp_domain.Exploration(e) for e in\n exp_models.ExplorationModel.get_public_explorations()]",
"def _fetch(self):\n self._data = self._get(self.url)\n\n if self._data['released_errata'] is not None:\n self._released_errata = Erratum(errata_id=self._data[\n 'released_errata']['id'])\n\n for errata_dict in self._data['all_errata']:\n errata = Erratum(errata_id=errata_dict['id'])\n self._all_errata.append(errata)\n\n self._signed_rpms = self._data.get('rpms_signed')\n\n for et_file in self._data['files']:\n self._files.append(et_file['path'])",
"def fetch_object(url):\n print(' GET ' + url)\n session = requests.Session()\n retry = Retry(connect=3, backoff_factor=15)\n adapter = HTTPAdapter(max_retries=retry)\n session.mount('http://', adapter)\n session.mount('https://', adapter)\n r = session.get(url)\n # Covering internal server errors by retrying one more time\n if r.status_code == 500:\n time.sleep(5)\n r = requests.get(url, allow_redirects=True)\n elif r.status_code != 200:\n print(f\"Problem with request: {str(r)}\")\n raise RuntimeError(\"Non-200 status code\")\n return r",
"def explores(self, explores):\n\n self._explores = explores"
] | [
"0.79972434",
"0.6808644",
"0.64591634",
"0.6081574",
"0.6081095",
"0.6056974",
"0.5249449",
"0.5153191",
"0.5144307",
"0.5057118",
"0.49663934",
"0.49415502",
"0.4938596",
"0.49360746",
"0.49263144",
"0.4916875",
"0.49087682",
"0.48916966",
"0.48450214",
"0.4824901",
"0.48051685",
"0.48000914",
"0.47367904",
"0.47179312",
"0.46975544",
"0.4694692",
"0.46923035",
"0.46534842",
"0.46450973",
"0.4628136"
] | 0.73947066 | 1 |
fetcher.get_used_explores() should return all used explores. | def test_get_used_explores(fc: fetcher.Fetcher, test_model, test_used_explore_names):
used_explores = fc.get_used_explores(model=test_model["name"])
assert isinstance(used_explores, dict)
assert all(e in test_used_explore_names for e in used_explores) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_unused_explores(fc: fetcher.Fetcher, test_model, test_unused_explores):\n unused_explores = fc.get_unused_explores(model=test_model[\"name\"])\n assert all(e in test_unused_explores for e in unused_explores)",
"def test_get_explores_filters(fc: fetcher.Fetcher):\n explores = fc.get_explores(model=\"henry_dusty\")\n assert all(e.model_name == \"henry_dusty\" for e in explores)\n\n explores = fc.get_explores(model=\"henry_qa\", explore=\"explore_2_joins_all_used\")\n assert all(\n e.model_name == \"henry_qa\" and e.name == \"explore_2_joins_all_used\"\n for e in explores\n )",
"def test_get_explores(fc: fetcher.Fetcher):\n explores = fc.get_explores()\n assert isinstance(explores, list)\n assert len(explores) > 0\n assert isinstance(explores[0], models.LookmlModelExplore)",
"def exploits(self):\n return self.rpc.call(MsfRpcMethod.ModuleExploits)['modules']",
"def test_get_explore_field_stats(\n fc: fetcher.Fetcher,\n looker_sdk: methods.Looker40SDK,\n test_model,\n test_used_explore_names,\n test_explores_stats,\n):\n explore = fc.get_explores(\n model=test_model[\"name\"], explore=test_used_explore_names[0]\n )[0]\n actual_stats = fc.get_explore_field_stats(explore)\n assert isinstance(actual_stats, dict)\n\n for e in test_explores_stats:\n if e[\"name\"] == test_used_explore_names[0]:\n expected_stats = e\n\n assert all(actual_stats[k] == 0 for k in expected_stats[\"unused_fields\"])\n assert all(actual_stats[k] > 0 for k in expected_stats[\"used_fields\"])",
"def get_exploits():\n results = {}\n for loader, name, ispkg in pkgutil.walk_packages(acsploit.exploits.__path__):\n m = loader.find_module(name).load_module(name)\n\n if not ispkg and hasattr(m, 'options') and hasattr(m, 'run'):\n exploit = name.replace('.', '/')\n results[exploit] = m\n\n return results",
"def get_expenses(self, parameter=None):\n resp = zoho_http_client.get(base_url, self.details, self.headers, parameter)\n return parser.get_list(resp)",
"def summarize_unused_offers(app_queue: Optional[MarathonQueueItem]) -> Dict[str, int]:\n unused_offers = get_app_queue_last_unused_offers(app_queue)\n reasons: Dict[str, int] = defaultdict(lambda: 0)\n for offer in unused_offers:\n for reason in offer[\"reason\"]:\n reasons[reason] += 1\n return reasons",
"def get_usages(self):\n return self.client._perform_json(\"GET\", \"/projects/%s/managedfolders/%s/usages\" % (self.project_key, self.odb_id))",
"def popular_items(self):\n if self._popular_items is None:\n self._popular_items = self._get_popular_items(100)\n return self._popular_items",
"def test_get_list_most_expensive(self):\n\n expensive_goods_test = self.info_list.get_list_most_expensive()\n most_expensive_test = self.form_expensive_list_goods()\n\n self.assertEqual(expensive_goods_test, most_expensive_test)",
"def get_malware_used_by_groups():\n global malware_used_by_groups\n\n if not malware_used_by_groups:\n malware_used_by_groups = rsh.malware_used_by_groups(get_srcs())\n\n return malware_used_by_groups",
"def _count_explores(self) -> int:\n explore_count = 0\n for model in self.project.models:\n explore_count += len(model.explores)\n return explore_count",
"def unused_evals(self):\n\t\treturn self.Evals - self.nFES",
"def get_low_use_instances(self):\n response = self.support.describe_trusted_advisor_check_result(checkId=LOW_USE_CHECK_ID, language='en')\n if 'result' in response:\n return response['result'].get('flaggedResources', [])",
"def sitetotalrequests(self) :\n\t\ttry :\n\t\t\treturn self._sitetotalrequests\n\t\texcept Exception as e:\n\t\t\traise e",
"def explores(self, explores):\n\n self._explores = explores",
"def info_cache():\n return [custom_hit, custom_miss, len(custom_memory), total_custom_memory]",
"def get_low_use_instances(self):\n return self.low_use.batch_get_item(EmailSent=True)",
"def instances_used(self):\n return None",
"def read_used():\n used_hashes = {\"evs\": set([]),\n \"cache\": set([]),\n \"seeds\": set([])}\n\n with open(LOG_FILEPATH, 'rb') as logfile:\n for line in logfile.readlines():\n kind, hash = tuple(line.split('...'))\n used_hashes[kind].add(hash.rstrip())\n\n return used_hashes",
"def _get_global_popular_resources_uris(self,\n num_entries: int,\n resource_type: ResourceType = ResourceType.Table) -> List[str]:\n LOGGER.info('Querying global popular resources URIs')\n\n num_readers = app.config['POPULAR_RESOURCES_MINIMUM_READER_COUNT']\n\n relation_model = resource_relation_model[resource_type][UserResourceRel.read]\n res_key = f'{resource_type.name.lower()}_rk'\n res_attr = getattr(relation_model, res_key)\n user_attr = getattr(relation_model, 'user_rk')\n read_count_attr = getattr(relation_model, 'read_count')\n\n with self.client.create_session() as session:\n readers = func.count(user_attr).label('readers')\n usage_subquery = session.query(\n res_attr.label('res_key'),\n readers,\n func.sum(read_count_attr).label('total_reads')\n ).group_by(res_attr).having(readers >= num_readers).subquery()\n\n popular_usage = session.query(usage_subquery.c.res_key).order_by(\n (usage_subquery.c.readers * func.log(usage_subquery.c.total_reads)).desc()\n ).limit(num_entries).all()\n\n return [usage.res_key for usage in popular_usage]",
"def form_expensive_list_goods(self): \n\n self.database.truncate_all_tables()\n\n self.database.add(GoodInfo(\"рыба мороженая, Кета 1кг\", \n \"400\", \"5\", \"2020-12-30\", \"90\", \"2020-12-30\"))\n \n most_expensive_test_list = self.database.get_all_goods()\n\n\n return most_expensive_test_list",
"def test_tags_recently_used_count(self):\n\n po = self.catalog.load_pageobject('TagsPage')\n po.goto_page()\n tags = po.get_recently_used_tags()\n assert len(tags) <= 25, \\\n \"# tags is %s, which is greater than 25\" % (len(tags))",
"def used_by(self):\n response = check_defined(self, inspect.stack()[0][3])\n if not response:\n return response\n if 'used_by' in self.se.full_class_only_graph.nodes[self.uri]:\n response = self.se.full_class_only_graph.nodes[self.uri]['used_by']\n result = restructure_output(self,\n response,\n inspect.stack()[0][3],\n self.output_type)\n return result\n else:\n return []",
"def getExpired(self, idle=365):\n cutOff = datetime.datetime.now() - datetime.timedelta(days=idle)\n return [x for x in self.xeps if x.status == \"Experimental\" and x.date < cutOff]",
"def test_collect_demands(self):\n pass",
"def strategy_expensive(cookies, cps, history, time_left, build_info):\n print\n print \"STRATEGY PART BEGIN\"\n print\n items_available = []\n for item in build_info.build_items():\n items_available.append(item)\n while items_available:\n max_cost = 0\n for item in items_available:\n #print \"item:\", item, \", cost:\", build_info.get_cost(item)\n if build_info.get_cost(item) > max_cost:\n max_cost = build_info.get_cost(item)\n most_expensive = item\n print \"most expensive:\", most_expensive\n # check if time enough\n print \"checking time\"\n print \"time left:\", time_left\n print \"cost:\", max_cost\n print \"cookies can be produced:\", cps * time_left\n if cps * time_left + cookies < max_cost:\n items_available.remove(most_expensive)\n print \"not enough,\", most_expensive, \"removed\"\n print\n else:\n print most_expensive, \"chosen\"\n print \"STRATEGY PART END\"\n print\n return most_expensive",
"def check_for_exposed(context):\n json_data = context.response.json()\n if \"exploitable_vulnerabilities_count\" in json_data:\n raise Exception(\"Field exploitable_vulnerabilities_count Exposed in\"\n \" Free user result\")\n if \"vendor_package_link\" in json_data:\n raise Exception(\"Field vendor_package_link has been exposed for free user\")",
"def hits(self) :\n\t\ttry :\n\t\t\treturn self._hits\n\t\texcept Exception as e:\n\t\t\traise e"
] | [
"0.7264244",
"0.6198452",
"0.6186663",
"0.59214425",
"0.5807355",
"0.5651149",
"0.56107587",
"0.5609498",
"0.5529075",
"0.5527514",
"0.5387147",
"0.5349075",
"0.52784765",
"0.52772737",
"0.5273169",
"0.5267631",
"0.52531534",
"0.52378064",
"0.52269423",
"0.5195598",
"0.5152756",
"0.5101667",
"0.5036905",
"0.50301784",
"0.4966456",
"0.49473494",
"0.4895537",
"0.48755294",
"0.48739398",
"0.48575675"
] | 0.7800213 | 0 |
fetcher.get_unused_explores() should return all unused explores. | def test_get_unused_explores(fc: fetcher.Fetcher, test_model, test_unused_explores):
unused_explores = fc.get_unused_explores(model=test_model["name"])
assert all(e in test_unused_explores for e in unused_explores) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_used_explores(fc: fetcher.Fetcher, test_model, test_used_explore_names):\n used_explores = fc.get_used_explores(model=test_model[\"name\"])\n assert isinstance(used_explores, dict)\n assert all(e in test_used_explore_names for e in used_explores)",
"def exploits(self):\n return self.rpc.call(MsfRpcMethod.ModuleExploits)['modules']",
"def test_get_explores_filters(fc: fetcher.Fetcher):\n explores = fc.get_explores(model=\"henry_dusty\")\n assert all(e.model_name == \"henry_dusty\" for e in explores)\n\n explores = fc.get_explores(model=\"henry_qa\", explore=\"explore_2_joins_all_used\")\n assert all(\n e.model_name == \"henry_qa\" and e.name == \"explore_2_joins_all_used\"\n for e in explores\n )",
"def test_get_explores(fc: fetcher.Fetcher):\n explores = fc.get_explores()\n assert isinstance(explores, list)\n assert len(explores) > 0\n assert isinstance(explores[0], models.LookmlModelExplore)",
"def summarize_unused_offers(app_queue: Optional[MarathonQueueItem]) -> Dict[str, int]:\n unused_offers = get_app_queue_last_unused_offers(app_queue)\n reasons: Dict[str, int] = defaultdict(lambda: 0)\n for offer in unused_offers:\n for reason in offer[\"reason\"]:\n reasons[reason] += 1\n return reasons",
"def unused_evals(self):\n\t\treturn self.Evals - self.nFES",
"def getExpired(self, idle=365):\n cutOff = datetime.datetime.now() - datetime.timedelta(days=idle)\n return [x for x in self.xeps if x.status == \"Experimental\" and x.date < cutOff]",
"def test_obtain_issues_exclude_false_positives(self, mock_url_read):\n mock_url_read.side_effect = [LAST_SCAN, '{\"reportId\": 22}', '{\"status\": {\"value\": \"Created\"}}',\n SAST_REPORT.format(false_positive=True, severity='High')]\n self.__report.obtain_issues(['id'], 'high')\n issues = self.__report.issues()\n self.assertIsInstance(issues, List)\n self.assertEqual(len(issues), 0)",
"def get_exploits():\n results = {}\n for loader, name, ispkg in pkgutil.walk_packages(acsploit.exploits.__path__):\n m = loader.find_module(name).load_module(name)\n\n if not ispkg and hasattr(m, 'options') and hasattr(m, 'run'):\n exploit = name.replace('.', '/')\n results[exploit] = m\n\n return results",
"def get_unassigned_tags(**kwargs):\n return Tags.get_unassigned_tags(**kwargs)",
"def retrieve_closed_issues(self):\n return self._retrieve_issues(\"closed\")",
"def undefhits(self) :\n\t\ttry :\n\t\t\treturn self._undefhits\n\t\texcept Exception as e:\n\t\t\traise e",
"def get_expenses(self, parameter=None):\n resp = zoho_http_client.get(base_url, self.details, self.headers, parameter)\n return parser.get_list(resp)",
"def test_obtain_issues_exclude_wrong_severity(self, mock_url_read):\n mock_url_read.side_effect = [LAST_SCAN, '{\"reportId\": 22}', '{\"status\": {\"value\": \"Created\"}}',\n SAST_REPORT.format(false_positive=False, severity='Low')]\n self.__report.obtain_issues(['id'], 'high')\n issues = self.__report.issues()\n self.assertIsInstance(issues, List)\n self.assertEqual(len(issues), 0)",
"def get_incomplete_exp_summaries(user_id):\n incomplete_exploration_ids = get_all_incomplete_exp_ids(user_id)\n\n number_deleted = 0\n for exploration_id in incomplete_exploration_ids:\n if not exp_services.does_exploration_exists(exploration_id):\n number_deleted = number_deleted + 1\n remove_exp_from_incomplete_list(user_id, exploration_id)\n\n return exp_services.get_exploration_summaries_matching_ids(\n incomplete_exploration_ids), number_deleted",
"def get_unhealthy_instances(self):\n unhealthy = []\n for instance in self.instances.itervalues():\n if instance.state == InstanceState.RUNNING_FAILED:\n unhealthy.append(instance)\n continue # health report from epuagent (or absence of it) is irrelevant\n\n if instance.health not in _HEALTHY_STATES:\n\n # only allow the zombie state for instances that are\n # terminated\n if (instance.state < InstanceState.TERMINATED or\n instance.health == InstanceHealthState.ZOMBIE):\n unhealthy.append(instance)\n\n return unhealthy",
"def check_for_exposed(context):\n json_data = context.response.json()\n if \"exploitable_vulnerabilities_count\" in json_data:\n raise Exception(\"Field exploitable_vulnerabilities_count Exposed in\"\n \" Free user result\")\n if \"vendor_package_link\" in json_data:\n raise Exception(\"Field vendor_package_link has been exposed for free user\")",
"def test_obtain_issues_no_query(self, mock_url_read):\n mock_url_read.side_effect = \\\n [LAST_SCAN, '{\"reportId\": 22}', '{\"status\": {\"value\": \"Created\"}}', '<CxXMLResults />']\n self.__report.obtain_issues(['id'], 'high')\n issues = self.__report.issues()\n self.assertIsInstance(issues, List)\n self.assertEqual(len(issues), 0)",
"def _CheckExpirations(file_objs):\n expired = []\n unexpired = []\n for file_obj in file_objs:\n if _IsExpired(file_obj):\n expired.append(file_obj)\n else:\n unexpired.append(file_obj)\n return expired, unexpired",
"def get_app_queue_last_unused_offers(\n app_queue_item: Optional[MarathonQueueItem],\n) -> Sequence[Dict]:\n if app_queue_item is None:\n return []\n return app_queue_item.last_unused_offers",
"def _get_remaining(self):\n remaining = []\n for game_info in self.steam_keys:\n if game_info[1] not in self.steam_keys_given:\n remaining.append(game_info[0])\n return remaining",
"def explores(self, explores):\n\n self._explores = explores",
"def get_expired_nscache():\n now = int(time())\n keys_to_del = []\n for key, odict in nscache.iteritems():\n for dn, ce in odict.iteritems():\n if ce._expiration - now <= 0:\n keys_to_del.append((key, dn))\n return (keys_to_del, nscache)",
"def list_unresolved(self): # new\n feed = self.get_feed(limit=999999)\n posts = feed.get(\"threads\")\n\n for s in posts:\n if (\n s.get(\"approved_status\", \"approved\") != \"rejected\"\n and (\n s.get(\"type\", \"question\") != \"post\" or s.get(\"is_megathread\", True)\n )\n and not s.get(\"is_answered\", True)\n and s.get(\"unresolved_count\", 1)\n ):\n yield s",
"def forget(self, request):\n return []",
"def forget(self, request):\n return []",
"def get_low_use_instances(self):\n response = self.support.describe_trusted_advisor_check_result(checkId=LOW_USE_CHECK_ID, language='en')\n if 'result' in response:\n return response['result'].get('flaggedResources', [])",
"def missing_data_amounts():\n\n return [2]",
"def get_leverables(self):\n import re\n\n \"\"\"\n Gets all groups (leverables) from nexus\n :return: list\n \"\"\"\n if self.url == 'test':\n leverabellist = ['asu', 'bll', 'tfp']\n else:\n leverabellist = []\n try:\n response = urlopen('http://' + self.url + '/nexus/content/repositories/rpm-dev/fk/rpm/')\n except (HTTPError, URLError) as e:\n logger.error(e)\n return ['Error getting leverables!!!']\n\n for rline in response:\n line = rline.decode(\"utf-8\")\n if re.match(\".*<td>(.*)/repositories/(.*)\", line):\n leverabellist.append(line.split('\">')[-1].split('/')[0])\n\n return leverabellist",
"def get_healthy_instances(self):\n return [instance for instance in self.instances.itervalues()\n if instance.health in _HEALTHY_STATES and\n instance.state < InstanceState.RUNNING_FAILED]"
] | [
"0.6674417",
"0.5879174",
"0.573975",
"0.5573522",
"0.5512234",
"0.5511594",
"0.54789424",
"0.5453956",
"0.54249877",
"0.5305752",
"0.52119917",
"0.5211895",
"0.5186949",
"0.5154393",
"0.5097592",
"0.50868165",
"0.50696427",
"0.4995546",
"0.49941427",
"0.49911252",
"0.49783477",
"0.49753472",
"0.49482512",
"0.4939018",
"0.4932287",
"0.4932287",
"0.49266338",
"0.49254662",
"0.49111807",
"0.49102694"
] | 0.7982038 | 0 |
fetcher.get_explore_fields() should return an explores fields. | def test_get_explore_fields_gets_fields(
fc: fetcher.Fetcher, test_model, test_explores_stats
):
test_explore = test_explores_stats[0]
explore = fc.get_explores(model=test_model["name"], explore=test_explore["name"])
assert isinstance(explore, list)
explore = explore[0]
assert isinstance(explore, models.LookmlModelExplore)
assert explore.model_name == test_model["name"]
assert explore.name == test_explore["name"]
fields = fc.get_explore_fields(explore)
assert isinstance(fields, list)
assert fields == test_explore["all_fields"] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_explore_fields_gets_fields_for_dimension_or_measure_only_explores(\n fc: fetcher.Fetcher, test_model, test_dimensions_or_measures_only_explores\n):\n expected = test_dimensions_or_measures_only_explores[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=expected[\"name\"])\n assert isinstance(explore, list)\n actual = explore[0]\n assert actual.name == expected[\"name\"]\n assert not (actual.fields.dimensions and actual.fields.measures)\n expected_fields = [f[\"name\"] for f in expected[\"fields\"]]\n actual_fields = fc.get_explore_fields(actual)\n assert actual_fields == expected_fields",
"def test_get_explore_field_stats(\n fc: fetcher.Fetcher,\n looker_sdk: methods.Looker40SDK,\n test_model,\n test_used_explore_names,\n test_explores_stats,\n):\n explore = fc.get_explores(\n model=test_model[\"name\"], explore=test_used_explore_names[0]\n )[0]\n actual_stats = fc.get_explore_field_stats(explore)\n assert isinstance(actual_stats, dict)\n\n for e in test_explores_stats:\n if e[\"name\"] == test_used_explore_names[0]:\n expected_stats = e\n\n assert all(actual_stats[k] == 0 for k in expected_stats[\"unused_fields\"])\n assert all(actual_stats[k] > 0 for k in expected_stats[\"used_fields\"])",
"def test_get_explores(fc: fetcher.Fetcher):\n explores = fc.get_explores()\n assert isinstance(explores, list)\n assert len(explores) > 0\n assert isinstance(explores[0], models.LookmlModelExplore)",
"def test_get_used_explores(fc: fetcher.Fetcher, test_model, test_used_explore_names):\n used_explores = fc.get_used_explores(model=test_model[\"name\"])\n assert isinstance(used_explores, dict)\n assert all(e in test_used_explore_names for e in used_explores)",
"def test_get_explores_filters(fc: fetcher.Fetcher):\n explores = fc.get_explores(model=\"henry_dusty\")\n assert all(e.model_name == \"henry_dusty\" for e in explores)\n\n explores = fc.get_explores(model=\"henry_qa\", explore=\"explore_2_joins_all_used\")\n assert all(\n e.model_name == \"henry_qa\" and e.name == \"explore_2_joins_all_used\"\n for e in explores\n )",
"def fields(self):\n ...",
"def fields(request):\n fields = request.GET.getlist('field')\n response = {}\n if 'reviewers' in fields:\n response['reviewers'] = request.issue.reviewers or []\n if 'description' in fields:\n response['description'] = request.issue.description\n if 'subject' in fields:\n response['subject'] = request.issue.subject\n return response",
"def fieldsUrl(self):\n return self.lweBaseUrl + \\\n \"/collections/\" + self.collectionName + \\\n \"/fields\"",
"def get_query_fields(cls):\n ...",
"def fields(self):",
"def get_fields(self, pager=None):\n return Field.deserialize_list(self._get_multiple('fields', {}, pager))",
"def listFields(self):\n return self.get_json('/field')",
"def _get_fields(self):\n return self._fields",
"def request_fields(self, fields=None):\n # The cursor only works for the 'search' endpoint, just call\n # the 'field' endpoint and return all the field types\n response = self.connection.get_request(self.uri_field)\n if response.status_code != requests.codes.ok:\n logger.warning('JIRA Cloud returned %d for %s', response.status_code, self.uri_field)\n return []\n content = json.loads(response.content)\n # Overwrite some fields\n for c in content:\n if c['name'] == 'Epic Status':\n c['schema']['type'] = 'string'\n c['choices'] = (('To Do', 'To Do'), ('In Progress', 'In Progress'), ('Done', 'Done'))\n elif c['name'] == 'Resolution':\n c['choices'] = self._get_resolutions()\n\n # The KEY field is never returned\n c = {\n \"id\": \"key\",\n \"key\": \"key\",\n \"name\": \"Key\",\n \"custom\": False,\n \"orderable\": True,\n \"navigable\": True,\n \"searchable\": True,\n \"clauseNames\": [\n \"key\",\n ],\n \"schema\": {\n \"type\": \"string\",\n }\n }\n content.append(c)\n # The parent field is never returned\n c = {\n \"id\": \"parent\",\n \"key\": \"parent\",\n \"name\": \"Parent\",\n \"custom\": True,\n \"orderable\": True,\n \"navigable\": True,\n \"searchable\": True,\n \"clauseNames\": [\n \"parent\",\n ],\n \"schema\": {\n \"type\": \"any\",\n \"custom\": \"com.django-atlassian:parent\"\n }\n }\n content.append(c)\n return content",
"def get_fields(self):\r\n return self.fields",
"def fields(self):\r\n pass",
"def test_get_explore_join_stats(fc: fetcher.Fetcher, test_model):\n explore = fc.get_explores(\n model=test_model[\"name\"], explore=\"explore_2_joins_1_used\"\n )[0]\n field_stats = {\n \"explore_2_joins_1_used.d1\": 10,\n \"explore_2_joins_1_used.d2\": 5,\n \"explore_2_joins_1_used.d3\": 0,\n \"explore_2_joins_1_used.m1\": 0,\n \"join1.d1\": 10,\n \"join1.d2\": 10,\n \"join1.d3\": 10,\n \"join1.m1\": 0,\n \"join2.d1\": 0,\n \"join2.d2\": 0,\n \"join2.d3\": 0,\n \"join2.m1\": 0,\n }\n join_stats = fc.get_explore_join_stats(explore=explore, field_stats=field_stats)\n assert isinstance(join_stats, dict)\n assert len(join_stats) == 2\n assert join_stats == {\"join1\": 30, \"join2\": 0}",
"def pull_fields(self, org):\n pass",
"def Fields(self):\n return self._fields",
"def read_fields(self, limit = 0, collapse = False):\r\n\r\n keys = []\r\n probes = {}\r\n\r\n def probe_record(record, parent = None):\r\n for key, value in record.items():\r\n full_key = parent + \".\" + key if parent else key\r\n\r\n if self.expand and type(value) == dict:\r\n probe_record(value, full_key)\r\n continue\r\n\r\n if not full_key in probes:\r\n probe = brewery.dq.FieldTypeProbe(full_key)\r\n probes[full_key] = probe\r\n keys.append(full_key)\r\n else:\r\n probe = probes[full_key]\r\n probe.probe(value)\r\n\r\n count = 0\r\n for record in self.records():\r\n if collapse:\r\n record = collapse_record(record)\r\n\r\n probe_record(record)\r\n if limit and count >= limit:\r\n break\r\n count += 1\r\n\r\n fields = []\r\n\r\n for key in keys:\r\n probe = probes[key]\r\n field = Field(probe.field)\r\n\r\n storage_type = probe.unique_storage_type\r\n if not storage_type:\r\n field.storage_type = \"unknown\"\r\n elif storage_type == \"unicode\":\r\n field.storage_type = \"string\"\r\n else:\r\n field.storage_type = \"unknown\"\r\n field.concrete_storage_type = storage_type\r\n\r\n # FIXME: Set analytical type\r\n\r\n fields.append(field)\r\n\r\n self.fields = list(fields)\r\n return self.fields",
"def get_fields(self, path):\n with self.inspector(path) as opened_file:\n return opened_file.describe_fields()",
"def get_fields(self, key=None):\n return self._get_query('fields', self._build_params(key=key), Field)",
"def get_fields(self):\n return self.fields",
"def get_fields(self):\n return self.fields",
"def get_fields_list(self):\n return self.description[\"fields\"][\"values\"].keys()",
"def test_get_featured_front_page_returns_required_fields(self):\r\n\r\n app = self.create_app(None)\r\n app.owner = self.user\r\n db.session.add(app)\r\n featured = Featured(app=app)\r\n db.session.add(featured)\r\n db.session.commit()\r\n fields = ('id', 'name', 'short_name', 'info', 'n_volunteers', 'n_completed_tasks')\r\n\r\n featured = cached_apps.get_featured_front_page()[0]\r\n\r\n for field in fields:\r\n assert featured.has_key(field), \"%s not in app info\" % field",
"def _commercial_fields(self):\n return ['website']",
"def _select_fields(self):\r\n return []",
"def f(self):\r\n return self.fields()",
"def readAccessedFields(self):\n pass"
] | [
"0.7630596",
"0.71610755",
"0.619276",
"0.5935856",
"0.5886932",
"0.56719977",
"0.56685346",
"0.56128865",
"0.5601622",
"0.55978966",
"0.55809444",
"0.5578002",
"0.55722994",
"0.54728",
"0.5469623",
"0.5441008",
"0.543378",
"0.5390191",
"0.5388459",
"0.5369778",
"0.5361909",
"0.5316519",
"0.5309478",
"0.5309478",
"0.52869046",
"0.52429986",
"0.52423596",
"0.5213579",
"0.520554",
"0.5162964"
] | 0.8270363 | 0 |
fetcher.get_explore_fields() should return when an explore has only dimensions or only measures. | def test_get_explore_fields_gets_fields_for_dimension_or_measure_only_explores(
fc: fetcher.Fetcher, test_model, test_dimensions_or_measures_only_explores
):
expected = test_dimensions_or_measures_only_explores[0]
explore = fc.get_explores(model=test_model["name"], explore=expected["name"])
assert isinstance(explore, list)
actual = explore[0]
assert actual.name == expected["name"]
assert not (actual.fields.dimensions and actual.fields.measures)
expected_fields = [f["name"] for f in expected["fields"]]
actual_fields = fc.get_explore_fields(actual)
assert actual_fields == expected_fields | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_explore_fields_gets_fields(\n fc: fetcher.Fetcher, test_model, test_explores_stats\n):\n test_explore = test_explores_stats[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=test_explore[\"name\"])\n assert isinstance(explore, list)\n explore = explore[0]\n assert isinstance(explore, models.LookmlModelExplore)\n assert explore.model_name == test_model[\"name\"]\n assert explore.name == test_explore[\"name\"]\n fields = fc.get_explore_fields(explore)\n assert isinstance(fields, list)\n assert fields == test_explore[\"all_fields\"]",
"def test_get_explore_field_stats(\n fc: fetcher.Fetcher,\n looker_sdk: methods.Looker40SDK,\n test_model,\n test_used_explore_names,\n test_explores_stats,\n):\n explore = fc.get_explores(\n model=test_model[\"name\"], explore=test_used_explore_names[0]\n )[0]\n actual_stats = fc.get_explore_field_stats(explore)\n assert isinstance(actual_stats, dict)\n\n for e in test_explores_stats:\n if e[\"name\"] == test_used_explore_names[0]:\n expected_stats = e\n\n assert all(actual_stats[k] == 0 for k in expected_stats[\"unused_fields\"])\n assert all(actual_stats[k] > 0 for k in expected_stats[\"used_fields\"])",
"def test_get_used_explores(fc: fetcher.Fetcher, test_model, test_used_explore_names):\n used_explores = fc.get_used_explores(model=test_model[\"name\"])\n assert isinstance(used_explores, dict)\n assert all(e in test_used_explore_names for e in used_explores)",
"def test_get_explores(fc: fetcher.Fetcher):\n explores = fc.get_explores()\n assert isinstance(explores, list)\n assert len(explores) > 0\n assert isinstance(explores[0], models.LookmlModelExplore)",
"def fields(request):\n fields = request.GET.getlist('field')\n response = {}\n if 'reviewers' in fields:\n response['reviewers'] = request.issue.reviewers or []\n if 'description' in fields:\n response['description'] = request.issue.description\n if 'subject' in fields:\n response['subject'] = request.issue.subject\n return response",
"def test_get_featured_front_page_returns_required_fields(self):\r\n\r\n app = self.create_app(None)\r\n app.owner = self.user\r\n db.session.add(app)\r\n featured = Featured(app=app)\r\n db.session.add(featured)\r\n db.session.commit()\r\n fields = ('id', 'name', 'short_name', 'info', 'n_volunteers', 'n_completed_tasks')\r\n\r\n featured = cached_apps.get_featured_front_page()[0]\r\n\r\n for field in fields:\r\n assert featured.has_key(field), \"%s not in app info\" % field",
"def test_get_explore_join_stats(fc: fetcher.Fetcher, test_model):\n explore = fc.get_explores(\n model=test_model[\"name\"], explore=\"explore_2_joins_1_used\"\n )[0]\n field_stats = {\n \"explore_2_joins_1_used.d1\": 10,\n \"explore_2_joins_1_used.d2\": 5,\n \"explore_2_joins_1_used.d3\": 0,\n \"explore_2_joins_1_used.m1\": 0,\n \"join1.d1\": 10,\n \"join1.d2\": 10,\n \"join1.d3\": 10,\n \"join1.m1\": 0,\n \"join2.d1\": 0,\n \"join2.d2\": 0,\n \"join2.d3\": 0,\n \"join2.m1\": 0,\n }\n join_stats = fc.get_explore_join_stats(explore=explore, field_stats=field_stats)\n assert isinstance(join_stats, dict)\n assert len(join_stats) == 2\n assert join_stats == {\"join1\": 30, \"join2\": 0}",
"def test_get_explores_filters(fc: fetcher.Fetcher):\n explores = fc.get_explores(model=\"henry_dusty\")\n assert all(e.model_name == \"henry_dusty\" for e in explores)\n\n explores = fc.get_explores(model=\"henry_qa\", explore=\"explore_2_joins_all_used\")\n assert all(\n e.model_name == \"henry_qa\" and e.name == \"explore_2_joins_all_used\"\n for e in explores\n )",
"def get_fields(self, path):\n with self.inspector(path) as opened_file:\n return opened_file.describe_fields()",
"def get_query_fields(cls):\n ...",
"def _get_fields(self):\n return self._fields",
"def get_fields(self):\r\n return self.fields",
"def fields(self):",
"def _all_fields_all_data():\n # Takes all name fields\n all_fields = PhotoTech.objects.all().values()[0].keys()\n # For all fileds takes all fields data \n all_data = [PhotoView._all_data_fields(x) for x in all_fields]\n allowed_search_fields = ['zoom',\n 'matrix_resol',\n 'color',\n 'matrix_size',\n 'country']\n # Return dict {keys: fields}\n return {x: y for x, y in zip(all_fields, all_data)\n if x in allowed_search_fields}",
"def fields(self):\n ...",
"def _create_dimension_queries(\n self, explore: Explore, model_name: str\n ) -> List[Query]:\n queries = []\n for dimension in explore.dimensions:\n query = self.client.create_query(model_name, explore.name, [dimension.name])\n query = Query(\n query[\"id\"], lookml_ref=dimension, explore_url=query[\"share_url\"]\n )\n queries.append(query)\n return queries",
"def fields(self):\r\n pass",
"def get_fields_list(self):\n return self.description[\"fields\"][\"values\"].keys()",
"def get_fields(self):\n return self.fields",
"def get_fields(self):\n return self.fields",
"def listFields(self):\n return self.get_json('/field')",
"def fieldsUrl(self):\n return self.lweBaseUrl + \\\n \"/collections/\" + self.collectionName + \\\n \"/fields\"",
"def _create_explore_query(self, explore: Explore, model_name: str) -> Query:\n dimensions = [dimension.name for dimension in explore.dimensions]\n query = self.client.create_query(model_name, explore.name, dimensions)\n return Query(query[\"id\"], lookml_ref=explore, explore_url=query[\"share_url\"])",
"def getData(self):\n import labstep.entities.experimentDataField.repository as experimentDataFieldRepository\n\n return experimentDataFieldRepository.getDataFields(self)",
"def _commercial_fields(self):\n return ['website']",
"def GatherPageData(self, mr):\n # TODO(jrobbins): Allow deep-linking into this page.\n canned_query_views = []\n if mr.project_id:\n with mr.profiler.Phase('getting canned queries'):\n canned_queries = self.services.features.GetCannedQueriesByProjectID(\n mr.cnxn, mr.project_id)\n canned_query_views = [\n savedqueries_helpers.SavedQueryView(sq, idx + 1, None, None)\n for idx, sq in enumerate(canned_queries)]\n\n saved_query_views = []\n if mr.auth.user_id and self.services.features:\n with mr.profiler.Phase('getting saved queries'):\n saved_queries = self.services.features.GetSavedQueriesByUserID(\n mr.cnxn, mr.me_user_id)\n saved_query_views = [\n savedqueries_helpers.SavedQueryView(sq, idx + 1, None, None)\n for idx, sq in enumerate(saved_queries)\n if (mr.project_id in sq.executes_in_project_ids or\n not mr.project_id)]\n\n return {\n 'issue_tab_mode': 'issueAdvSearch',\n 'page_perms': self.MakePagePerms(mr, None, permissions.CREATE_ISSUE),\n 'canned_queries': canned_query_views,\n 'saved_queries': saved_query_views,\n }",
"def get_fields(self):\n \n return self.metadata.keys()",
"def get_fields(self):\n return list(self.metadata.keys())",
"def get_fields(self, key=None):\n return self._get_query('fields', self._build_params(key=key), Field)",
"def fields(request):\n # Only recognizes a few fields for now.\n if request.method != 'POST':\n fields = request.GET.getlist('field')\n response = {}\n if 'reviewers' in fields:\n response['reviewers'] = request.issue.reviewers or []\n if 'description' in fields:\n response['description'] = request.issue.description\n if 'subject' in fields:\n response['subject'] = request.issue.subject\n return response\n\n if not request.issue.edit_allowed:\n if not IS_DEV:\n return HttpTextResponse('Login required', status=401)\n fields = json.loads(request.POST.get('fields'))\n issue = request.issue\n if 'description' in fields:\n issue.description = fields['description']\n if 'reviewers' in fields:\n issue.reviewers = _get_emails_from_raw(fields['reviewers'])\n issue.calculate_updates_for()\n if 'subject' in fields:\n issue.subject = fields['subject']\n issue.put()\n return HttpTextResponse('')"
] | [
"0.79898906",
"0.7255912",
"0.57427955",
"0.5689888",
"0.5485005",
"0.544207",
"0.5428259",
"0.54008704",
"0.5328612",
"0.52564484",
"0.5198791",
"0.5090246",
"0.5081006",
"0.5072066",
"0.50549966",
"0.5046278",
"0.49843732",
"0.49804208",
"0.49604252",
"0.49604252",
"0.49590856",
"0.49215195",
"0.49204257",
"0.4919356",
"0.49069488",
"0.49057347",
"0.48774806",
"0.4862115",
"0.48620763",
"0.4860669"
] | 0.81051433 | 0 |
fetcher.get_explore_field_stats() should get the stats of all fields in an explore. | def test_get_explore_field_stats(
fc: fetcher.Fetcher,
looker_sdk: methods.Looker40SDK,
test_model,
test_used_explore_names,
test_explores_stats,
):
explore = fc.get_explores(
model=test_model["name"], explore=test_used_explore_names[0]
)[0]
actual_stats = fc.get_explore_field_stats(explore)
assert isinstance(actual_stats, dict)
for e in test_explores_stats:
if e["name"] == test_used_explore_names[0]:
expected_stats = e
assert all(actual_stats[k] == 0 for k in expected_stats["unused_fields"])
assert all(actual_stats[k] > 0 for k in expected_stats["used_fields"]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_explore_fields_gets_fields(\n fc: fetcher.Fetcher, test_model, test_explores_stats\n):\n test_explore = test_explores_stats[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=test_explore[\"name\"])\n assert isinstance(explore, list)\n explore = explore[0]\n assert isinstance(explore, models.LookmlModelExplore)\n assert explore.model_name == test_model[\"name\"]\n assert explore.name == test_explore[\"name\"]\n fields = fc.get_explore_fields(explore)\n assert isinstance(fields, list)\n assert fields == test_explore[\"all_fields\"]",
"def test_get_explore_fields_gets_fields_for_dimension_or_measure_only_explores(\n fc: fetcher.Fetcher, test_model, test_dimensions_or_measures_only_explores\n):\n expected = test_dimensions_or_measures_only_explores[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=expected[\"name\"])\n assert isinstance(explore, list)\n actual = explore[0]\n assert actual.name == expected[\"name\"]\n assert not (actual.fields.dimensions and actual.fields.measures)\n expected_fields = [f[\"name\"] for f in expected[\"fields\"]]\n actual_fields = fc.get_explore_fields(actual)\n assert actual_fields == expected_fields",
"def listFields(self):\n return self.get_json('/field')",
"def _field_extract(url):\n logging.info('extracting player stats from url: {}'.format(url))\n player_summary = requests.get(url)\n parser = BeautifulSoup(player_summary.content, 'html.parser')\n player_profile = parser.select('tr')\n list_of_fields = ['Innings', 'Not Outs', 'Aggregate', 'Highest Score', '50s', '100s', 'Ducks', '4s',\n '6s', 'Scoring Rate', 'Overs', 'Runs Conceded', 'Wickets', 'Average', '4 Wickets in Innings',\n '5 Wickets in Innings', 'Best', 'Economy Rate', 'Strike Rate', 'Catches',\n 'Most Catches in Innings', 'Stumpings', 'Most Catches in Innings',\n 'Most Dismissals in Innings',\n 'Won/Lost', 'Matches/Won/Lost', 'Tosses Won', 'Runs Scored', 'Batting Average']\n mapped_fields = {} # holds series level stats\n stats_header = '' # holds series stats metric header\n for each_field in range(0, len(player_profile)):\n # get stats header\n try:\n stats = player_profile[each_field].select_one('.ProfileSection').text.strip()\n if stats in ['Batting', 'Fielding', 'Bowling', 'Wicket Keeping', 'Captaincy']:\n stats_header = stats\n except Exception as e:\n str(e) # just ignore the exception\n # update stats data\n try:\n field = player_profile[each_field].select_one('.FieldName').text.split(':')[0]\n value = player_profile[each_field].select_one('.FieldValue').text.strip()\n if field in list_of_fields:\n mapped_fields['{}_{}'.format(stats_header.lower(), field.replace(' ', '_').lower())] = value\n except AttributeError as ae:\n logging.info('skip: May be html tree doesn\\'t find search - {}'.format(ae))\n logging.info('extract completed for url: {} ..... /200'.format(url))\n return mapped_fields",
"def field():\n data = request.get_json()\n return jsonify(result=Statistics.field_type_count(data['field']))",
"def fields(self):\n ...",
"def get_fields(self, path):\n with self.inspector(path) as opened_file:\n return opened_file.describe_fields()",
"def fields(self):",
"def test_get_explore_join_stats(fc: fetcher.Fetcher, test_model):\n explore = fc.get_explores(\n model=test_model[\"name\"], explore=\"explore_2_joins_1_used\"\n )[0]\n field_stats = {\n \"explore_2_joins_1_used.d1\": 10,\n \"explore_2_joins_1_used.d2\": 5,\n \"explore_2_joins_1_used.d3\": 0,\n \"explore_2_joins_1_used.m1\": 0,\n \"join1.d1\": 10,\n \"join1.d2\": 10,\n \"join1.d3\": 10,\n \"join1.m1\": 0,\n \"join2.d1\": 0,\n \"join2.d2\": 0,\n \"join2.d3\": 0,\n \"join2.m1\": 0,\n }\n join_stats = fc.get_explore_join_stats(explore=explore, field_stats=field_stats)\n assert isinstance(join_stats, dict)\n assert len(join_stats) == 2\n assert join_stats == {\"join1\": 30, \"join2\": 0}",
"def get_fields(self, pager=None):\n return Field.deserialize_list(self._get_multiple('fields', {}, pager))",
"def readAccessedFields(self):\n pass",
"def _get_fields(self):\n return self._fields",
"def _field_stat(self, field):\r\n if not field in self.stats:\r\n stat = dq.FieldStatistics(field, distinct_threshold = self.distinct_threshold)\r\n self.stats[field] = stat\r\n else:\r\n stat = self.stats[field]\r\n return stat",
"def field(field_id):\n if not request.is_xhr:\n abort(403)\n\n if field_id == 0:\n field_id = session.get('current_field_id', 2)\n\n session['current_field_id'] = field_id\n state = {\n 'status': 0,\n 'field_size': 0,\n 'fox_count': 0,\n 'foxes': [],\n 'start_time': 0,\n 'end_time': 0,\n 'shot_count': 0,\n 'last_shot_result': '',\n 'hits': 0,\n 'is_in_top_10': False,\n }\n field = Field.query.get(field_id)\n state['field_size'] = field.size\n state['fox_count'] = field.fox_count\n\n installed_foxes = 0\n foxes = []\n random.seed()\n while installed_foxes < field.fox_count:\n x = random.randrange(field.size)\n y = random.randrange(field.size)\n fox = {\n 'x': x,\n 'y': y,\n }\n if fox in foxes:\n continue\n foxes.append(fox)\n installed_foxes += 1\n\n state['foxes'] = foxes\n session['state'] = state\n\n result = state.copy()\n del result['foxes'] # We don't want to spoil foxes' positions\n\n return jsonify(result)",
"def field_names(self):\n ...",
"def _get_all_field_functions(self):\n get_url = 'v1/fieldFunctions'\n self.field_functions = {f['mdmId']: f for f in self.carol.call_api(get_url, params=dict(pageSize=-1))['hits']}\n self.field_functions_id = {f['mdmName']: f['mdmId'] for f in self.field_functions.values()}",
"def fields(request):\n fields = request.GET.getlist('field')\n response = {}\n if 'reviewers' in fields:\n response['reviewers'] = request.issue.reviewers or []\n if 'description' in fields:\n response['description'] = request.issue.description\n if 'subject' in fields:\n response['subject'] = request.issue.subject\n return response",
"def get_query_fields(cls):\n ...",
"def Fields(self):\n return self._fields",
"def fields(self):\r\n pass",
"def fields(self):\r\n return self._by_name.iteritems()",
"def get_field_values(self, index, field, **kwargs):\n search = self._build_search(index, **kwargs)\n search.aggs.bucket('fieldCounts', 'terms', field=field, size=10000)\n fieldValues = {}\n for bucket in search.execute().aggregations.fieldCounts.buckets:\n fieldValues[bucket.key] = bucket.doc_count\n return fieldValues",
"def get_fields(self):\r\n return self.fields",
"def read_fields(self, limit = 0, collapse = False):\r\n\r\n keys = []\r\n probes = {}\r\n\r\n def probe_record(record, parent = None):\r\n for key, value in record.items():\r\n full_key = parent + \".\" + key if parent else key\r\n\r\n if self.expand and type(value) == dict:\r\n probe_record(value, full_key)\r\n continue\r\n\r\n if not full_key in probes:\r\n probe = brewery.dq.FieldTypeProbe(full_key)\r\n probes[full_key] = probe\r\n keys.append(full_key)\r\n else:\r\n probe = probes[full_key]\r\n probe.probe(value)\r\n\r\n count = 0\r\n for record in self.records():\r\n if collapse:\r\n record = collapse_record(record)\r\n\r\n probe_record(record)\r\n if limit and count >= limit:\r\n break\r\n count += 1\r\n\r\n fields = []\r\n\r\n for key in keys:\r\n probe = probes[key]\r\n field = Field(probe.field)\r\n\r\n storage_type = probe.unique_storage_type\r\n if not storage_type:\r\n field.storage_type = \"unknown\"\r\n elif storage_type == \"unicode\":\r\n field.storage_type = \"string\"\r\n else:\r\n field.storage_type = \"unknown\"\r\n field.concrete_storage_type = storage_type\r\n\r\n # FIXME: Set analytical type\r\n\r\n fields.append(field)\r\n\r\n self.fields = list(fields)\r\n return self.fields",
"def get_fields(self, key=None):\n return self._get_query('fields', self._build_params(key=key), Field)",
"def EvaluateFields(self, *float, **kwargs):\n ...",
"def fields(self, _only_called=False, **kwargs):\n\n # Check for an operator and transform to mongo-style if there is one\n operators = [\"slice\"]\n cleaned_fields = []\n for key, value in kwargs.items():\n parts = key.split(\"__\")\n if parts[0] in operators:\n op = parts.pop(0)\n value = {\"$\" + op: value}\n\n key = \".\".join(parts)\n try:\n field_name, value = self._check_valid_field_name_to_project(key, value)\n except ValueError as e:\n raise e\n\n cleaned_fields.append((field_name, value))\n\n # divide fields on groups by their values\n # (ONLY group, EXCLUDE group etc.) and add them to _loaded_fields\n # as an appropriate QueryFieldList\n fields = sorted(cleaned_fields, key=operator.itemgetter(1))\n for value, group in itertools.groupby(fields, lambda x: x[1]):\n fields = [field for field, value in group]\n self._loaded_fields += QueryFieldList(fields, value=value, _only_called=_only_called)\n\n return self",
"def getData(self):\n import labstep.entities.experimentDataField.repository as experimentDataFieldRepository\n\n return experimentDataFieldRepository.getDataFields(self)",
"def describe_index_fields(DomainName=None, FieldNames=None, Deployed=None):\n pass",
"def pull_fields(self, org):\n pass"
] | [
"0.757388",
"0.66561705",
"0.5693254",
"0.5563461",
"0.5562313",
"0.55102324",
"0.55069894",
"0.54181457",
"0.5415948",
"0.53354806",
"0.53127414",
"0.528702",
"0.5241747",
"0.52340806",
"0.52330387",
"0.52114326",
"0.51936793",
"0.5185652",
"0.51400167",
"0.5138318",
"0.5132618",
"0.5127807",
"0.51146215",
"0.51070225",
"0.5106135",
"0.5088951",
"0.50864375",
"0.5081033",
"0.50448966",
"0.50198954"
] | 0.7587318 | 0 |
fetcher.get_explore_join_stats() should return the stats of all joins in an explore. | def test_get_explore_join_stats(fc: fetcher.Fetcher, test_model):
explore = fc.get_explores(
model=test_model["name"], explore="explore_2_joins_1_used"
)[0]
field_stats = {
"explore_2_joins_1_used.d1": 10,
"explore_2_joins_1_used.d2": 5,
"explore_2_joins_1_used.d3": 0,
"explore_2_joins_1_used.m1": 0,
"join1.d1": 10,
"join1.d2": 10,
"join1.d3": 10,
"join1.m1": 0,
"join2.d1": 0,
"join2.d2": 0,
"join2.d3": 0,
"join2.m1": 0,
}
join_stats = fc.get_explore_join_stats(explore=explore, field_stats=field_stats)
assert isinstance(join_stats, dict)
assert len(join_stats) == 2
assert join_stats == {"join1": 30, "join2": 0} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def extract_join_info(node):\n operator_info = node['operatorInfo']\n analyze_info = node['AnalyzeInfo']\n\n if 'Join' in node['id']:\n # Join Node\n join_type = extract_join_type(operator_info)\n conditions = extract_join_conditions(operator_info)\n current_node = JoinPlan(join_type, conditions)\n assert 'children' in node and len(node['children']) == 2\n childrens = node['children']\n current_node.left_node = extract_join_info(childrens[0])\n current_node.right_node = extract_join_info(childrens[1])\n current_node.execute_time = analyze_info[\"time\"]\n current_node.est_rows = node[\"estRows\"]\n else:\n # Table Reader\n # assert 'TableReader' in node['id']\n # extract selection if need\n current_node = extract_table_reader(node)\n current_node.est_rows = node['estRows']\n return current_node",
"def join_stats(join_on, mg_stats):\n new_stats = {}\n\n def add_metagenome_stats(new_mgs, old_mgs):\n return MetagenomeStats(*[getattr(new_mgs, f) + getattr(old_mgs, f) for f in old_mgs._fields])\n\n for mgs in mg_stats:\n # find maximally matching join criterion\n max_match = 0\n group = ''\n for grp in join_on:\n if grp in mgs[0] and len(grp) > max_match:\n max_match = len(grp)\n group = grp\n\n new_stats[group] = mgs[1] if group not in new_stats else add_metagenome_stats(mgs[1], new_stats[group])\n return new_stats",
"def num_joins(self):\n return self._num_joins",
"def test_get_explore_field_stats(\n fc: fetcher.Fetcher,\n looker_sdk: methods.Looker40SDK,\n test_model,\n test_used_explore_names,\n test_explores_stats,\n):\n explore = fc.get_explores(\n model=test_model[\"name\"], explore=test_used_explore_names[0]\n )[0]\n actual_stats = fc.get_explore_field_stats(explore)\n assert isinstance(actual_stats, dict)\n\n for e in test_explores_stats:\n if e[\"name\"] == test_used_explore_names[0]:\n expected_stats = e\n\n assert all(actual_stats[k] == 0 for k in expected_stats[\"unused_fields\"])\n assert all(actual_stats[k] > 0 for k in expected_stats[\"used_fields\"])",
"def joins(self):\n return self._joins",
"def num_joins(self):\n ret_val = self._num_joins()\n return ret_val",
"def get(self):\n join = request.args.get('join')\n limit = request.args.get('limit')\n\n current_user = User.find_by_username(get_jwt_identity())\n\n if join is None:\n room_list = Room.query.all()\n else:\n if join == 'true':\n room_list = current_user.joined_room\n else:\n room_list = Room.query.filter(~Room.members.any(id=current_user.id)) \\\n .limit(limit if limit is not None else 15).all()\n result = rooms_schema.dump(room_list)\n return {\"status\": \"success\", \"data\": result}, 200",
"def test_get_used_explores(fc: fetcher.Fetcher, test_model, test_used_explore_names):\n used_explores = fc.get_used_explores(model=test_model[\"name\"])\n assert isinstance(used_explores, dict)\n assert all(e in test_used_explore_names for e in used_explores)",
"def join(self):\n super().join()\n return self.grad, self.traj_infos, self.opt_info",
"async def on_member_join(self, member: Member) -> None:\n if member.guild.id != Guild.id:\n return\n\n self.bot.stats.gauge(\"guild.total_members\", len(member.guild.members))",
"def test_get_explore_fields_gets_fields(\n fc: fetcher.Fetcher, test_model, test_explores_stats\n):\n test_explore = test_explores_stats[0]\n explore = fc.get_explores(model=test_model[\"name\"], explore=test_explore[\"name\"])\n assert isinstance(explore, list)\n explore = explore[0]\n assert isinstance(explore, models.LookmlModelExplore)\n assert explore.model_name == test_model[\"name\"]\n assert explore.name == test_explore[\"name\"]\n fields = fc.get_explore_fields(explore)\n assert isinstance(fields, list)\n assert fields == test_explore[\"all_fields\"]",
"def get_statistics(self):\n statistics = {\n 'entry': 0,\n 'bandwidth': 0,\n 'exit': 0,\n 'pages': 0\n }\n downloads = statistics.copy()\n \n portal_state = getMultiAdapter(\n (self.context, self.request), name=u'plone_portal_state'\n )\n context_state = getMultiAdapter(\n (self.context, self.request), name=u'plone_context_state'\n )\n site = portal_state.portal()\n \n url = self.context.absolute_url().replace(site.absolute_url(), '')\n urls = []\n if url == '':\n url = '/'\n quoted_url = urllib.quote(url)\n \n urls.append(quoted_url)\n urls.append(quoted_url + '/view')\n canonical_url = urllib.quote(context_state.canonical_object_url())\n if canonical_url not in urls:\n urls.append(canonical_url)\n urls.append(canonical_url + '/view')\n\n query = 'SELECT * FROM statistics WHERE url IN %s' % str(tuple(urls))\n results = Session.execute(query).fetchall()\n if results:\n for row in results:\n for key in statistics.keys():\n statistics[key] = statistics[key] + int(row[key])\n\n results_dw = Session.execute(\n 'SELECT * FROM statistics WHERE url=\"%s/at_download%%\"' % quoted_url).fetchall()\n if results_dw:\n for row in rows_stat:\n for key in statistics.keys():\n downloads[key] = downloads[key] + int(row[key])\n statistics['downloads'] = downloads['pages']\n return statistics",
"def num_joins(self, num_joins):\n self._num_joins = num_joins",
"async def on_member_join(self, member: discord.Member) -> None:\n\n await add_user_in_db(member, member.guild)\n\n guild_from_db = await Guilds.get(guild_id=member.guild.id)\n role_saver = guild_from_db.role_saver\n if role_saver:\n user_roles = await UserRoles.get_many(guild_id=member.guild.id, user_id=member.id)\n if user_roles:\n for rol in user_roles:\n role = discord.utils.get(member.guild.roles, id=rol.role_id)\n if role.name == '@everyone':\n continue\n else:\n await member.add_roles(role)\n\n await Profiles.update(user_id=member.id,\n guild_id=member.guild.id,\n set=[\"joins = joins + 1\"])\n await Guilds.update(guild_id=member.guild.id,\n set=[\"day_joins = day_joins + 1\"])\n\n await self.refresh_user_count_channel(member.guild)",
"async def on_guild_join(self, guild):\n l.info(f\"Joined {guild.name} with {guild.member_count} users!\")",
"def community_stats(request):\n stats = cache.get(STATS_CACHE_KEY, None)\n if not stats:\n\n stats = fetch(PEOPLE_STATS_URL)\n packages_data = fetch(PACKAGES_STATS_URL)\n if 'meta' in packages_data:\n stats.update({'packages': packages_data['meta']['total_count']})\n\n stats = {'community_stats': stats}\n\n cache.add(STATS_CACHE_KEY, stats, 60 * 60 * 12) # for half a day\n\n return stats",
"def joincount(pntGraph, lineGraph, criterion='', threshold=0):\n matNumDict, _ = spatialjoin._spatialjoin(pntGraph, lineGraph, criterion, threshold)\n for edge in lineGraph.edges(data=True):\n edge[2]['joinCount'] = matNumDict[edge[2]['Ind']]\n print('The join count is added to the POLYLINE type graph.')",
"def ingest_joined_data(self, joined_data_buffer, ratio=0.8):\n # local join to simulate a joining workflow\n\n # update next_join_job_id and joining state\n next_join_job_id = JoinManager.name_next_join_job(experiment_id=self.experiment_id)\n self.exp_db_client.update_experiment_next_join_job_id(self.experiment_id, next_join_job_id)\n self.exp_db_client.update_experiment_joining_state(self.experiment_id, JoiningState.PENDING)\n\n self.next_join_job = JoinManager(\n join_db_client=self.join_db_client,\n experiment_id=self.experiment_id,\n join_job_id=next_join_job_id,\n input_obs_data_s3_path=\"local-join-does-not-apply\",\n input_reward_data_s3_path=\"local-join-does-not-apply\",\n boto_session=self.boto_session,\n )\n\n logger.info(\"Started dummy local joining job...\")\n self.next_join_job.start_dummy_join(joined_data_buffer=joined_data_buffer, ratio=ratio)\n\n # this method can be invoked either in local/SM mode\n succeeded_state = (\n self.experiment_record._joining_state == JoiningState.SUCCEEDED\n and self.experiment_record._last_joined_job_id == next_join_job_id\n and self.experiment_record._next_join_job_id is None\n )\n num_retries = 0\n\n while not succeeded_state:\n # Sync experiment state if required\n self._sync_experiment_state_with_ddb()\n logger.debug(\"Waiting for experiment table joining status to be updated...\")\n time.sleep(2 * (2 ** num_retries))\n succeeded_state = (\n self.experiment_record._joining_state == JoiningState.SUCCEEDED\n and self.experiment_record._last_joined_job_id == next_join_job_id\n and self.experiment_record._next_join_job_id is None\n )\n num_retries += 1\n if num_retries >= 5:\n raise UnhandledWorkflowException(\n f\"Joining job '{self.experiment_record._next_join_job_id}' \"\n f\"was in state of '{self.experiment_record._joining_state}'. Failed to sync table states.\"\n )\n if (\n self.experiment_record._joining_state == JoiningState.FAILED\n or self.experiment_record._joining_state == JoiningState.CANCELLED\n ):\n raise WorkflowJoiningJobException(\n f\"Joining job '{self.experiment_record._next_join_job_id}' \"\n f\"ended with state '{self.experiment_record._joining_state}'. Please check if provided \"\n \"joined_data_buffer was in correct data format.\"\n )",
"def search_all_join_on(sql_dict, table_json, args, join_on_label=None):\n all_from = []\n global globe_join_on_label_count\n globe_join_on_label_count = 0\n\n sql_dict['where'] = intersect_check(sql_dict['where'])\n groupby_list = []\n groupby_top = \"\"\n re_sql = \"select distinct \" if sql_dict['select'][0] else \"select \"\n orderby_sql, table_list, agg_in_order = (\"\", [], False)\n # if args.orderby_to_subquery and is_orderby_for_subquery(sql_dict): # v1.1\n # orderby_sql,table_list,agg_in_order = (\"\",[],False)\n # else:\n # orderby_sql,table_list,agg_in_order = create_order_by(sql_dict['orderBy'],sql_dict['limit'])\n\n # Get table info from select column\n for column in sql_dict['select'][1]:\n table = column[1][1][1].split('.')[0].lower()\n if not table in table_list:\n table_list.append(table)\n select_unit = select_unit_back(column)\n if not (column[0] or column[1][1][0]):\n groupby_list.append(select_unit)\n re_sql += select_unit + ' , '\n re_sql = re_sql[:-3]\n top_select_table_list = copy.deepcopy(table_list)\n # Add table info to select column\n break_idx, table_list, next_sql, sql_where, sql_having, orderby_sql_, next_table_list = get_where_column(sql_dict,\n table_list,\n 0, SQL_TOP,\n table_json,\n args)\n if break_idx < 0 or next_sql == SQL_TOP:\n orderby_sql, table_list_order, agg_in_order = create_order_by(sql_dict['orderBy'], sql_dict['limit'])\n for order_t in table_list_order:\n if order_t.lower() not in table_list:\n table_list.append(order_t.lower())\n\n if sql_dict['groupBy']: # V1.1:\n groupby_top = \" group by \" + col_unit_back(sql_dict['groupBy'][0])\n elif (len(groupby_list) != len(sql_dict['select'][1]) and groupby_list) or sql_having.strip() != '' or (\n agg_in_order and groupby_list) or orderby_sql_.strip():\n if args.group_for_exact_match and len(groupby_list) > 1:\n groupby_list = infer_group_for_exact_match(groupby_list, table_json)\n groupby_top = \" group by \" + \",\".join(groupby_list)\n\n orderby_sql += orderby_sql_\n from_table_net, table_fk_list = get_table_network(table_json, table_list, join_on_label)\n\n from_table_netss, _ = get_table_network(table_json, table_list, join_on_label, False)\n all_from.append(from_table_netss)\n\n top_sql_list = [re_sql]\n re_sql += create_from_table(from_table_net, table_json['table_names_original'],\n table_json['table_column_names_original'], table_fk_list)\n top_sql_list.append(re_sql + sql_where + groupby_top + sql_having)\n\n if sql_dict['where']:\n while next_sql:\n table_list = next_table_list # []#V1.2\n if next_sql == SQL_TOP:\n sub_sql = \" \" + sql_dict['where'][break_idx][:-1] + \" \" + top_sql_list[0]\n table_list = top_select_table_list\n start_new_top_sql = True\n else:\n select_column = col_unit_back(sql_dict['where'][break_idx][3])\n sub_sql = \"select \" + select_column\n if sql_dict['where'][break_idx][3][1].split('.')[0].lower() not in table_list:\n table_list.append(sql_dict['where'][break_idx][3][1].split('.')[0].lower())\n start_new_top_sql = False\n\n break_idx, table_list, next_sql, sql_where, sql_having, orderby_sql_, next_table_list = get_where_column(\n sql_dict, table_list, break_idx + 1, next_sql, table_json, args)\n if args.orderby_to_subquery and not orderby_sql_:\n orderby_sql_, table_list = orderby_to_subquery(sql_dict, table_list) # v1.1\n\n # if not start_new_top_sql:\n from_table_net, table_fk_list = get_table_network(table_json, table_list, join_on_label)\n from_table_netss, _ = get_table_network(table_json, table_list, join_on_label, False)\n all_from.append(from_table_netss)\n sub_sql += create_from_table(from_table_net, table_json['table_names_original'],\n table_json['table_column_names_original'], table_fk_list)\n\n # if sql_where.strip() != 'where':\n sub_sql += sql_where\n\n if not start_new_top_sql:\n # if (sql_having.strip() and select_column) or (orderby_sql_.strip() and select_column):#v1.0\n if (sql_having.strip() and select_column) or ((\n \"max(\" in orderby_sql_ or \"min(\" in orderby_sql_ or \"count(\" in orderby_sql_ or \"sum(\" in orderby_sql_ or \"avg(\" in orderby_sql_) and select_column): # v1.0\n sub_sql += \" group by \" + select_column\n else:\n if groupby_top.strip():\n sub_sql += groupby_top\n elif (sql_having.strip() != '' and groupby_list) or (orderby_sql_.strip() and groupby_list):\n sub_sql += \" group by \" + \",\".join(groupby_list)\n\n sub_sql += sql_having + orderby_sql_\n\n if start_new_top_sql:\n top_sql_list.append(sub_sql)\n else:\n top_sql_list[len(top_sql_list) - 1] = top_sql_list[len(top_sql_list) - 1].replace('@@@', sub_sql, 1)\n\n re_sql = \"\"\n for idx, sql in enumerate(top_sql_list):\n if idx > 0:\n re_sql += sql\n\n re_sql += orderby_sql\n\n return re_sql, all_from, sql_dict",
"def on_join_data(self, data):\n self.users.client.key = data[7] # unique user identifier ?\n self.users.client.join_time = data[11] # join time as unix including milliseconds ?\n self._room_id = data[13] # room id\n\n self.send_connection_ok()\n\n if config.DEBUG_TO_CONSOLE:\n print ('Join Data:')\n for i, v in enumerate(data):\n print ('\\t[%s] - %s' % (i, v))",
"def test_combine_peer_stats(self):\n tracts = Geo.objects.filter(geo_type=Geo.TRACT_TYPE, cbsa=request.GET.get('metro'))\n metro = Geo.objects.get(geo_type=Geo.METRO_TYPE, geoid=request.GET.get('metro'))\n lender = Institution.objects.get(institution_id=request.GET.get('lender'))\n peers = lender.get_peer_list(metro, None, None)\n peer_data_collector = []\n for peer in peers:\n peer_request = HttpRequest()\n peer_request.GET['lender'] = peer.institution.institution_id\n peer_request.GET['metro']= metro.geoid\n peer_lar_data = loan_originations_as_json(peer_request)\n peer_data_collector.append(assemble_stats(peer_lar_data, tracts))\n peer_stats = combine_peer_stats(peer_data_collector)\n self.assertEqual(peer_stats['hma_pct'], 0.0)\n self.assertEqual(peer_stats['lma_pct'], 1.0)\n self.assertEqual(peer_stats['mma_pct'], 0.0)\n self.assertEqual(peer_stats['lma'], 7)\n self.assertEqual(peer_stats['mma'], 0)\n self.assertEqual(peer_stats['hma'], 0)\n self.assertEqual(peer_stats['lar_total'], 7)",
"def raw_joins(self):\n return self.obj_payload[\"joins\"]",
"def fetch_metrics(self):\n\n self.explain_all_indices()",
"def get_joins(self, p, vv):\n self._get_joins(p, vv)",
"def get_workspace_share_details():\n\n # connect to mysql\n db_connection = mysql.connect(\n host=sql_host, user=\"metrics\", passwd=metrics_mysql_password, database=\"metrics\"\n )\n\n cursor = db_connection.cursor()\n query = \"use \" + query_on\n cursor.execute(query)\n\n workspaces_dict = get_workspaces(db_connection)\n kb_staff = get_kbase_staff(db_connection)\n (workspaces_dict, max_shared_count) = get_workspace_shares(\n workspaces_dict, kb_staff\n )\n\n ################\n # Print the header line:\n ################\n header_line = (\n \"Narrative ID\\tOwner\\tCreation Date\\tLast Modified\\tis_deleted\\tis_public\"\n )\n for i in range(max_shared_count):\n header_line += \"\\tShared_person_{}\\tShare_Type_{}\\tis_KB_Staff_{}\".format(\n str(i + 1), str(i + 1), str(i + 1)\n )\n print(header_line)\n\n ###############\n # Print the WS rows\n ###############\n for ws_id in workspaces_dict:\n print(\n \"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\".format(\n str(ws_id),\n workspaces_dict[ws_id][\"username\"],\n workspaces_dict[ws_id][\"creation_date\"],\n workspaces_dict[ws_id][\"mod_date\"],\n str(workspaces_dict[ws_id][\"is_deleted\"]),\n str(workspaces_dict[ws_id][\"is_public\"]),\n \"\\t\".join(workspaces_dict[ws_id][\"shares_list\"]),\n )\n )",
"def api_contests_join():\n if request.method == 'GET':\n user = get_queryparam('user')\n returnJSON = models.select_joined_contest(\n params=('*'),\n conditions=('{}=\\\"{}\\\"'.format(\n settings.DB_COLUMNS.JOINED_CONTEST_USER,\n user\n )\n )\n )\n return jsonify(returnJSON)\n elif request.method == 'POST':\n postJSON = request.get_json()\n models.insert_joined_contest(\n postJSON[settings.DB_COLUMNS.JOINED_CONTEST_USER],\n postJSON[settings.DB_COLUMNS.JOINED_CONTEST_CONTEST]\n )\n return ('', 204)\n elif request.method == 'DELETE':\n postJSON = request.get_json()\n models.delete_joined_contest(\n postJSON[settings.DB_COLUMNS.JOINED_CONTEST_USER],\n postJSON[settings.DB_COLUMNS.JOINED_CONTEST_CONTEST]\n )\n return ('', 204)",
"def get_joins(self):\n\t\tprint('connecting to Sql Server:\\n\\tserver: {server}\\n\\tdatabase: {database}'.format(**self.__dict__))\n\t\t# Child: <schema>.<table>\n\t\t# Parent: <schema>.<table>\n\t\t# Column: <column>\n\t\tJoin = namedtuple('Join', 'Child,Column,Datatype,Parent')\n\t\twith pymssql.connect(self.server, self.user, self.password, self.database) as conn:\n\t\t\t# read sql source file\n\t\t\tprint('reading query_file: {}'.format(self.query_file))\n\t\t\twith open(self.query_file) as sqlfile:\n\t\t\t\tquery = sqlfile.read()\n\t\t\t\t# connect to SQL Server\n\t\t\t\t# init query\n\t\t\t\tcur = conn.cursor()\n\t\t\t\tprint('executing query')\n\t\t\t\tcur.execute(query)\n\t\t\t\t# load query results into list of namedtuple python data structure\n\t\t\t\tself.joins = [j for j in map(Join._make, cur)]\n\t\t\t\t# leave file, sql connection contexts",
"def join_ids(self) -> List[int]:\n return self._join_ids",
"def stats():\n return jsonify(shorten.get_stats(get_db(), app.config['MINI_URL_BASE']))",
"def _get_open_projects_info():\n projects = Project.objects.filter(project_open=True).order_by(\"created_at\")\n projects_sum_hours = []\n for project in projects:\n time_entries_pro_project = TimeEntry.objects.filter(project=project)\n used_hours = _sum_hours(time_entries_pro_project)\n hours_percent = _calculate_hours_percent(used_hours, project.stimated_hours)\n projects_sum_hours.append(\n {\n \"hours_percent_number\": hours_percent,\n \"hours_percent\": f\"{hours_percent}%\",\n \"worked_hours\": used_hours,\n \"project\": project,\n }\n )\n return projects_sum_hours"
] | [
"0.5480241",
"0.53656536",
"0.50666654",
"0.5061134",
"0.49818888",
"0.4904676",
"0.48338452",
"0.48054832",
"0.47597033",
"0.47334749",
"0.4718956",
"0.47070545",
"0.46772164",
"0.46477938",
"0.46049055",
"0.45803955",
"0.45192826",
"0.44968593",
"0.44656146",
"0.44228086",
"0.43793604",
"0.43723446",
"0.43236396",
"0.43163905",
"0.4316303",
"0.43152854",
"0.43032235",
"0.42919376",
"0.42771378",
"0.42507434"
] | 0.83031076 | 0 |
Normalize audio file to range [1, 1] | def normalize(audio):
norm = audio/max(audio)
return norm | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def normalize(filename,wout=True):\n n, data, data_dB,sr,ch=inputwav(filename)\n if ch==1:\n diff=0-max(data_dB)\n if ch==2:\n d1=0-max(data_dB[:,0])\n d2=0-max(data_dB[:,1])\n diff=max(d1,d2)\n print('Adding '+str(diff)+' dB...')\n data_dB_norm=data_dB+diff\n data_norm=10.0**((data_dB_norm)/20.0)\n #sign the bits appropriately:\n for k in range (ch):\n for i in range (n):\n if data[i,k]<0.0:\n data_norm[i,k]=-1.0*data_norm[i,k]\n if wout==True:\n print('Exporting...')\n sf.write(filename[0:len(filename)-4]+'_normalized.wav',data_norm,sr,'PCM_16')\n print('Done!')\n return data_norm",
"def normalize(wav, flux):\n return flux / flux.max() # maximum flux = 1\n\n # flux_norm = flux[wav>wav_norm][0]\n # return flux / flux_norm",
"def normalize_audio(audio_path: str, output_path: str, name: str):\n sound = AudioSegment.from_file(audio_path + os.sep + name + '.wav',\n \"wav\")\n change_in_d_bfs = (-20.0) - sound.dBFS\n sound = sound.apply_gain(change_in_d_bfs)\n sound.export(output_path + os.sep + name + '.wav', format=\"wav\")",
"def normalize(volume):\n\n MIN_BOUND = 0\n MAX_BOUND = 256.0\n volume = (volume - MIN_BOUND) /(MAX_BOUND - MIN_BOUND)\n volume[volume > 1] = 1 #Clip everything larger than 1 and 0\n volume[volume < 0] = 0\n volume = (volume*255).astype('uint8')\n\n return volume",
"def normalize(volume):\n\n MIN_BOUND = 0\n MAX_BOUND = 256.0\n volume = (volume - MIN_BOUND) /(MAX_BOUND - MIN_BOUND)\n volume[volume > 1] = 1 #Clip everything larger than 1 and 0\n volume[volume < 0] = 0\n volume = (volume*255).astype('uint8')\n\n return volume",
"def normalize(volume):\n max = np.amax(volume)\n if max == 0:#Fixes dividing by 0 error if nothing in the volume\n return volume.astype(np.uint8)\n\n normalized = volume * (255.0 / max)\n normalized = np.round(normalized).astype(np.uint8)\n return normalized",
"def normalize_sample(sample_data):\n BASE = 255\n sample_data = np.array(sample_data, dtype='float32')\n return sample_data/BASE",
"def normalize(self,arr):\n arr = arr/(arr.max()/255.0)\n return arr",
"def normalize(image):\n return image / 127.5 - 1.",
"def normalize(image):\r\n return image / 127.5 - 1.",
"def normalize_frames(frames):\n new_frames = frames.astype(np.float32)\n new_frames /= (255 / 2)\n new_frames -= 1\n\n return new_frames",
"def normalize_signal(signal):\n gain = 1.0 / (np.max(np.abs(signal)) + 1e-9)\n return signal * gain",
"def normalize(array):\n high = array.max()\n low = array.min()\n rng = high - low\n array[:] = 1.0 - ((high - array) / rng)",
"def normalize_01(x):\n return x / 255.0",
"def normalise(image):",
"def normalize(img):\n img = np.clip(img, 0, 255).astype(np.uint8)\n return img / 255",
"def normalize(sample, maxval):\n sample = (2 * (sample.astype(np.float32) / maxval) - 1.) * 1024\n #sample = sample / np.std(sample)\n return sample",
"def _normalize_(x: np.array) -> np.array:\n if x.max() != 0:\n x = x / x.max()\n return np.clip(x, 0, 1)# ensure that no values are >1\n else:\n raise ZeroDivisionError('Image Normalization')",
"def normalize(array):\n\treturn array/np.max(array)",
"def normalize(x):\n # TODO: Implement Function\n \n return x/255",
"def normalize_volumes_mixmode(directory, amplitude=0.08, ext='.wav'):\n subdirectories = [x[0] for x in os.walk(directory)]\n for subdirectory in subdirectories:\n os.system(f\"normalize-audio -w 16 -a {amplitude} -b '{subdirectory}/'*{ext}\")",
"def normalize(img):\n img = img.astype(np.float32)\n img -= img.min()\n img /= img.max()\n img *= 255\n img = img.astype(np.uint8)\n\n return img",
"def normalize(self, ref=1):\n maximum = max(abs(self.intensities))\n return Spectrum(self.wavelengths, ref * self.intensities/maximum)",
"def normalize(array):\n array_min, array_max = array.min(), array.max()\n return ((array - array_min)/(array_max - array_min))",
"def normalize_audio_feature(audio_feature, per_feature=False):\n axis = 0 if per_feature else None\n mean = np.mean(audio_feature, axis=axis)\n std_dev = np.std(audio_feature, axis=axis) + 1e-9\n normalized = (audio_feature - mean) / std_dev\n return normalized",
"def normalize(arr):\n arr = arr.astype('float')\n # Do not touch the alpha channel\n for i in range(1):\n minval = arr[...,i].min()\n maxval = arr[...,i].max()\n if minval != maxval:\n arr[...,i] -= minval\n arr[...,i] *= (255.0/(maxval-minval))\n return arr",
"def normalize(av, vmin=0., vmax=1.):\n if vmin == vmax:\n return np.ones_like(av)*vmin\n elif vmax < vmin:\n warnings.warn(\"swapping vmin and vmax, because vmax < vmin.\")\n vmin, vmax = vmax, vmin\n\n norm_one = (av - np.min(av))/(np.max(av)-np.min(av))\n return norm_one * (vmax-vmin) + vmin",
"def normalize(batch_img: np.ndarray) -> np.ndarray:\n batch_img = batch_img.astype('float32')\n return batch_img / 127.5 - 1",
"def apply_fourier_transform(chunked_audio):\n pass",
"def normalize(x):\n # TODO: Implement Function\n data_max = np.max(x)\n data_min = np.min(x)\n x = (x - data_min) / (data_max - data_min)\n return x"
] | [
"0.6990606",
"0.69585127",
"0.6922056",
"0.6835355",
"0.6835355",
"0.66305023",
"0.64685136",
"0.64316094",
"0.6400724",
"0.6398994",
"0.63599336",
"0.6334538",
"0.63280183",
"0.62815994",
"0.62611055",
"0.62473464",
"0.6237534",
"0.6221386",
"0.6176301",
"0.6140515",
"0.6132791",
"0.6123954",
"0.60941035",
"0.6029671",
"0.6020562",
"0.6014642",
"0.59951377",
"0.597458",
"0.59674263",
"0.59554684"
] | 0.78221744 | 0 |
Load an audio file and segment into 10s increments Save each segment to the target directory. Append the gender of the speaker and the segment index to the filename. | def segment_audio(filename, y_value, split='train', clf='gender'):
filepath = 'recordings/recordings/' + filename + '.mp3'
audio, sr = librosa.load(filepath, sr=16000)
audio = normalize(audio)
# Add gender label to filename for later processing
sex = y_value
if sex == 'female':
filename = '{}.F'.format(filename)
else: filename = '{}.M'.format(filename)
# Segment audio file
seg_files = segment_10s(audio, sr)
for key, val in seg_files.items():
new_name = '{}.{}'.format(filename, key)
sf.write('data/{}/{}/{}o.wav'.format(clf, split, new_name), val, sr) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def file_generator(files: list,\n segment_duration: float,\n sampleRate: int,\n db_thr: float or None = None,\n frame_length: int = 512,\n hop_length: int = 128,\n ) -> None:\n\n I = 0\n J = 0\n\n segment = np.zeros((int(segment_duration*sampleRate),))\n\n k = 0\n file_no = 0\n\n while True:\n if I >= len(segment):\n yield segment\n segment = np.zeros((int(segment_duration*sampleRate),))\n I = 0\n\n if k == 0 or J >= len(y):\n J = 0\n y, sr = librosa.core.load(files[file_no], mono=True, sr=sampleRate)\n file_no += 1\n\n if file_no == len(files):\n break\n\n # Normalize\n y = y/y.max()\n\n # Remix non-silent segments\n if db_thr is not None:\n # Figure out intervals of non-silence (NOTE: Is the threshold right? -- 60db quiet)\n intervals = librosa.effects.split(y, frame_length=frame_length, hop_length=hop_length, top_db=db_thr)\n\n # Remix according to those intervals\n y = librosa.effects.remix(y, intervals)\n\n if len(segment[I:]) >= len(y[J:]):\n segment[I:I+len(y[J:])] = y[J:]\n I = I + len(y[J:])\n J = J + len(y[J:])\n else:\n segment[I:] = y[J:J+len(segment[I:])]\n J = J + len(segment[I:])\n I = I + len(segment[I:])\n k += 1",
"def process(filename, debug_mode=False):\n if debug_mode:\n global DO_REPORT\n DO_REPORT = debug_mode\n\n try:\n signal = preprocessing(filename)\n except BaseException as e:\n print(e)\n sys.exit()\n\n labels, num_of_speakers = diarization(signal)\n segments = lab2seg(labels)\n res_filename = create_csv(filename, segments)\n return res_filename, num_of_speakers",
"def read_audio(audio_paths, speaker_dict, tool, config, normalize, is_training,\n save_path=None, save_format='numpy',\n global_mean_male=None, global_mean_female=None,\n global_std_male=None, global_std_female=None,\n dtype=np.float32):\n if not is_training:\n if global_mean_male is None or global_mean_female is None:\n raise ValueError('Set mean & std computed in the training set.')\n if normalize not in ['global', 'speaker', 'utterance', 'no']:\n raise ValueError(\n 'normalize must be \"utterance\" or \"speaker\" or \"global\" or \"no\".')\n if tool not in ['htk', 'python_speech_features', 'librosa']:\n raise TypeError(\n 'tool must be \"htk\" or \"python_speech_features\"' +\n ' or \"librosa\".')\n\n audio_path_list_male, audio_path_list_female = [], []\n total_frame_num_male, total_frame_num_female = 0, 0\n total_frame_num_dict = {}\n speaker_mean_dict = {}\n\n # NOTE: 講演ごとに異なるspeakerとみなす\n\n # Loop 1: Computing global mean and statistics\n if is_training and normalize != 'no':\n print('=====> Reading audio files...')\n for i, audio_path in enumerate(tqdm(audio_paths)):\n speaker = basename(audio_path).split('.')[0]\n\n # Divide each audio file into utterances\n _, input_utt_sum, speaker_mean, _, total_frame_num_speaker = segment(\n audio_path,\n speaker,\n speaker_dict[speaker],\n is_training=True,\n sil_duration=0,\n tool=tool,\n config=config)\n\n if i == 0:\n # Initialize global statistics\n feature_dim = input_utt_sum.shape[0]\n global_mean_male = np.zeros((feature_dim,), dtype=dtype)\n global_mean_female = np.zeros(\n (feature_dim,), dtype=dtype)\n global_std_male = np.zeros((feature_dim,), dtype=dtype)\n global_std_female = np.zeros((feature_dim,), dtype=dtype)\n\n # For computing global mean\n if speaker[3] == 'M':\n audio_path_list_male.append(audio_path)\n global_mean_male += input_utt_sum\n total_frame_num_male += total_frame_num_speaker\n elif speaker[3] == 'F':\n audio_path_list_female.append(audio_path)\n global_mean_female += input_utt_sum\n total_frame_num_female += total_frame_num_speaker\n else:\n raise ValueError\n\n # For computing speaker stddev\n if normalize == 'speaker':\n speaker_mean_dict[speaker] = speaker_mean\n total_frame_num_dict[speaker] = total_frame_num_speaker\n # NOTE: speaker mean is already computed\n\n print('=====> Computing global mean & stddev...')\n # Compute global mean per gender\n global_mean_male /= total_frame_num_male\n global_mean_female /= total_frame_num_female\n\n for audio_path in tqdm(audio_paths):\n speaker = basename(audio_path).split('.')[0]\n\n # Divide each audio into utterances\n input_data_dict_speaker, _, _, _, _ = segment(\n audio_path,\n speaker,\n speaker_dict[speaker],\n is_training=True,\n sil_duration=0,\n tool=tool,\n config=config)\n\n # For computing global stddev\n if speaker[3] == 'M':\n for input_utt in input_data_dict_speaker.values():\n global_std_male += np.sum(\n np.abs(input_utt - global_mean_male) ** 2, axis=0)\n elif speaker[3] == 'F':\n for input_utt in input_data_dict_speaker.values():\n global_std_female += np.sum(\n np.abs(input_utt - global_mean_female) ** 2, axis=0)\n else:\n raise ValueError\n\n # Compute global stddev per gender\n global_std_male = np.sqrt(\n global_std_male / (total_frame_num_male - 1))\n global_std_female = np.sqrt(\n global_std_female / (total_frame_num_female - 1))\n\n if save_path is not None:\n # Save global mean & std per gender\n np.save(join(save_path, 'global_mean_male.npy'),\n global_mean_male)\n np.save(join(save_path, 'global_mean_female.npy'),\n global_mean_female)\n np.save(join(save_path, 'global_std_male.npy'),\n global_std_male)\n np.save(join(save_path, 'global_std_female.npy'),\n global_std_female)\n\n # Loop 2: Normalization and Saving\n print('=====> Normalization...')\n frame_num_dict = {}\n sampPeriod, parmKind = None, None\n for audio_path in tqdm(audio_paths):\n speaker = basename(audio_path).split('.')[0]\n\n if normalize == 'speaker' and is_training:\n speaker_mean = speaker_mean_dict[speaker]\n else:\n speaker_mean = None\n\n # Divide each audio into utterances\n input_data_dict_speaker, _, speaker_mean, speaker_std, _ = segment(\n audio_path,\n speaker,\n speaker_dict[speaker],\n is_training=is_training,\n sil_duration=0,\n tool=tool,\n config=config,\n mean=speaker_mean) # for compute speaker sttdev\n # NOTE: input_data_dict_speaker have been not normalized yet\n\n for utt_index, input_utt in input_data_dict_speaker.items():\n\n if normalize == 'no':\n pass\n elif normalize == 'global' or not is_training:\n # Normalize by mean & std over the training set per gender\n if speaker[3] == 'M':\n input_utt -= global_mean_male\n input_utt /= global_std_male\n elif speaker[3] == 'F':\n input_utt -= global_mean_female\n input_utt /= global_std_female\n else:\n raise ValueError\n elif normalize == 'speaker':\n # Normalize by mean & std per speaker\n input_utt = (input_utt - speaker_mean) / speaker_std\n elif normalize == 'utterance':\n # Normalize by mean & std per utterance\n utt_mean = np.mean(input_utt, axis=0, dtype=dtype)\n utt_std = np.std(input_utt, axis=0, dtype=dtype)\n input_utt = (input_utt - utt_mean) / utt_std\n else:\n raise ValueError\n\n frame_num_dict[speaker + '_' + utt_index] = input_utt.shape[0]\n\n if save_path is not None:\n # Save input features\n if save_format == 'numpy':\n input_data_save_path = mkdir_join(\n save_path, speaker, speaker + '_' + utt_index + '.npy')\n np.save(input_data_save_path, input_utt)\n elif save_format == 'htk':\n if sampPeriod is None:\n _, sampPeriod, parmKind = read(audio_path)\n write(input_utt,\n htk_path=mkdir_join(\n save_path, speaker, speaker + '_' + utt_index + '.htk'),\n sampPeriod=sampPeriod,\n parmKind=parmKind)\n else:\n raise ValueError('save_format is numpy or htk.')\n\n if save_path is not None:\n # Save the frame number dictionary\n with open(join(save_path, 'frame_num.pickle'), 'wb') as f:\n pickle.dump(frame_num_dict, f)\n\n return (global_mean_male, global_mean_female,\n global_std_male, global_std_female, frame_num_dict)",
"def read_audio(audio_paths, speaker_dict, tool, config, normalize, is_training,\n save_path=None,\n train_global_mean_male=None, train_global_mean_female=None,\n train_global_std_male=None, train_global_std_female=None,\n dtype=np.float64):\n if not is_training:\n if train_global_mean_male is None or train_global_mean_female is None:\n raise ValueError('Set mean & std computed in the training set.')\n if normalize not in ['global', 'speaker', 'utterance']:\n raise ValueError('normalize is \"utterance\" or \"speaker\" or \"global\".')\n\n audio_path_list_male, audio_path_list_female = [], []\n total_frame_num_male, total_frame_num_female = 0, 0\n total_frame_num_dict = {}\n speaker_mean_dict = {}\n\n # NOTE: speaker norm は講演ごとの正規化とする\n # 講演間の話者関係がわからないから\n\n # Loop 1: Computing global mean and statistics\n if is_training:\n print('===> Reading audio files...')\n for i, audio_path in enumerate(tqdm(audio_paths)):\n speaker = basename(audio_path).split('.')[0]\n\n # Divide each audio file into utterances\n _, input_data_utt_sum, speaker_mean, _, total_frame_num_speaker = segment_htk(\n audio_path,\n speaker,\n speaker_dict[speaker],\n is_training=True,\n sil_duration=0,\n tool=tool,\n config=config)\n\n if i == 0:\n # Initialize global statistics\n feature_dim = input_data_utt_sum.shape[0]\n train_global_mean_male = np.zeros((feature_dim,), dtype=dtype)\n train_global_mean_female = np.zeros(\n (feature_dim,), dtype=dtype)\n train_global_std_male = np.zeros((feature_dim,), dtype=dtype)\n train_global_std_female = np.zeros((feature_dim,), dtype=dtype)\n\n # For computing global mean\n if speaker[3] == 'M':\n audio_path_list_male.append(audio_path)\n train_global_mean_male += input_data_utt_sum\n total_frame_num_male += total_frame_num_speaker\n elif speaker[3] == 'F':\n audio_path_list_female.append(audio_path)\n train_global_mean_female += input_data_utt_sum\n total_frame_num_female += total_frame_num_speaker\n else:\n raise ValueError\n\n # For computing speaker stddev\n if normalize == 'speaker':\n speaker_mean_dict[speaker] = speaker_mean\n total_frame_num_dict[speaker] = total_frame_num_speaker\n # NOTE: すでに話者平均は計算できている\n\n print('===> Computing global mean & stddev...')\n # Compute global mean per gender\n train_global_mean_male /= total_frame_num_male\n train_global_mean_female /= total_frame_num_female\n\n for audio_path in tqdm(audio_paths):\n speaker = basename(audio_path).split('.')[0]\n\n # Divide each audio into utterances\n input_data_dict_speaker, _, _, _, _ = segment_htk(\n audio_path,\n speaker,\n speaker_dict[speaker],\n is_training=True,\n sil_duration=0,\n tool=tool,\n config=config)\n\n # For computing global stddev\n if speaker[3] == 'M':\n for input_data_utt in input_data_dict_speaker.values():\n train_global_std_male += np.sum(\n np.abs(input_data_utt - train_global_mean_male) ** 2, axis=0)\n elif speaker[3] == 'F':\n for input_data_utt in input_data_dict_speaker.values():\n train_global_std_female += np.sum(\n np.abs(input_data_utt - train_global_mean_female) ** 2, axis=0)\n else:\n raise ValueError\n\n # Compute global stddev per gender\n train_global_std_male = np.sqrt(\n train_global_std_male / (total_frame_num_male - 1))\n train_global_std_female = np.sqrt(\n train_global_std_female / (total_frame_num_female - 1))\n\n if save_path is not None:\n # Save global mean & std per gender\n np.save(join(save_path, 'train_global_mean_male.npy'),\n train_global_mean_male)\n np.save(join(save_path, 'train_global_mean_female.npy'),\n train_global_mean_female)\n np.save(join(save_path, 'train_global_std_male.npy'),\n train_global_std_male)\n np.save(join(save_path, 'train_global_std_female.npy'),\n train_global_std_female)\n\n # Loop 2: Normalization and Saving\n print('===> Normalization...')\n frame_num_dict = {}\n for audio_path in tqdm(audio_paths):\n speaker = basename(audio_path).split('.')[0]\n\n if normalize == 'speaker' and is_training:\n speaker_mean = speaker_mean_dict[speaker]\n else:\n speaker_mean = None\n\n # Divide each audio into utterances\n input_data_dict_speaker, _, speaker_mean, speaker_std, _ = segment_htk(\n audio_path,\n speaker,\n speaker_dict[speaker],\n is_training=is_training,\n sil_duration=0,\n tool=tool,\n config=config,\n mean=speaker_mean) # for compute speaker sttdev\n # NOTE: input_data_dict_speaker have been not normalized yet\n\n for utt_index, input_data_utt in input_data_dict_speaker.items():\n\n if normalize == 'utterance' and is_training:\n # Normalize by mean & std per utterance\n utt_mean = np.mean(input_data_utt, axis=0, dtype=dtype)\n utt_std = np.std(input_data_utt, axis=0, dtype=dtype)\n input_data_utt = (input_data_utt - utt_mean) / utt_std\n\n elif normalize == 'speaker' and is_training:\n # Normalize by mean & std per speaker\n input_data_utt = (input_data_utt - speaker_mean) / speaker_std\n\n else:\n # Normalize by mean & std over the training set per gender\n if speaker[3] == 'M':\n input_data_utt -= train_global_mean_male\n input_data_utt /= train_global_std_male\n elif speaker[3] == 'F':\n input_data_utt -= train_global_mean_female\n input_data_utt /= train_global_std_female\n else:\n raise ValueError\n\n if save_path is not None:\n # Save input features\n input_data_save_path = mkdir_join(\n save_path, speaker + '_' + utt_index + '.npy')\n np.save(input_data_save_path, input_data_utt)\n frame_num_dict[speaker + '_' +\n utt_index] = input_data_utt.shape[0]\n\n if save_path is not None:\n # Save the frame number dictionary\n with open(join(save_path, 'frame_num.pickle'), 'wb') as f:\n pickle.dump(frame_num_dict, f)\n\n return (train_global_mean_male, train_global_mean_female,\n train_global_std_male, train_global_std_female)",
"def __save(self,audio):\n self.__openSampleFile()\n self.__sampleFile.writeframes(audio)",
"def generate_seg_file(self, filename):\n self._generate_a_seg_file(filename, self.wave[:-4])",
"def segment(sound_file, spec_file, ms_step, pix_per_s, sound_output_dir, spec_output_dir):\n pix_per_ms = pix_per_s/1000\n sound = AudioSegment.from_wav(sound_file)\n start, stop = 0, ms_step\n start_pixel, stop_pixel = start*pix_per_ms, stop*pix_per_ms\n spec = Image.open(spec_file)\n chopping = True\n while stop <= len(sound):\n \n # Split sound\n chunk = sound[start:stop]\n chunk.export(sound_output_dir + sound_file.split(\"/\")[-1].split(\".\")[0] + \"_\" + str(start) + \"-\" + str(stop) + \".wav\", format=\"wav\")\n\n # Split spectrogram\n w, h = spec.size\n cropped_spec = spec.crop((start_pixel, 0, stop_pixel, h))\n cropped_spec.save(spec_output_dir + sound_file.split(\"/\")[-1].split(\".\")[0] + \"_\" + str(start) + \"-\" + str(stop) + \".png\")\n\n start += ms_step\n stop += ms_step\n start_pixel, stop_pixel = start*pix_per_ms, stop*pix_per_ms",
"def set_fname_encoder(self):\n\n fp = open(self.meta_path, 'r')\n wav_names = []\n next(fp)\n for i, line in tqdm(enumerate(fp)):\n audio_name, _, _, _ = line.split()\n wav_name = os.path.basename(audio_name)\n wav_names.append(wav_name)\n self.fname_encoder.fit(wav_names)",
"def diarization(self):\n self._status = 1\n if self._single:\n try:\n os.mkdir(self.get_file_basename())\n except OSError, err:\n if err.errno != 17:\n raise err\n fm._silence_segmentation(self._basename)\n fm._gender_detection(self._basename)\n segname = self._basename + '.seg'\n f_seg = open(segname, 'r')\n headers = []\n values = []\n differ = False\n basic = None\n gen = {'M': 0, 'F': 0, 'U': 0}\n for line in f_seg.readlines():\n if line.startswith(';;'):\n headers.append(line[line.index('['):])\n else:\n a_line = line.split(' ')\n if basic == None:\n basic = a_line[4]\n if a_line[4] != basic:\n differ = True\n gen[a_line[4]] += int(a_line[3])\n values.append(a_line)\n header = \";; cluster:S0 %s\" % headers[0]\n from operator import itemgetter\n index = 0\n while index < len(values):\n values[index][2] = int(values[index][2])\n index += 1\n values = sorted(values, key=itemgetter(2))\n index = 0\n while index < len(values):\n values[index][2] = str(values[index][2])\n index += 1\n newfile = open(segname + '.tmp', 'w')\n newfile.write(header)\n if differ: #in case the gender of the single segments differ \n# then set the prevailing\n# print 'transgender :-D'\n if gen[ 'M' ] > gen[ 'F' ]:\n basic = 'M'\n elif gen[ 'M' ] < gen[ 'F' ] :\n basic = 'F'\n else:\n basic = 'U'\n\n for line in values:\n line[4] = basic #same gender for all segs\n newfile.write(' '.join(line[:-1]) + ' S0\\n')\n f_seg.close()\n newfile.close()\n shutil.copy(self.get_file_basename() + '.wav',\n os.path.join(self.get_file_basename(), 'S0' + '.wav'))\n shutil.move(segname + '.tmp', segname)\n shutil.copy(self.get_file_basename() + '.seg',\n os.path.join(self.get_file_basename(), 'S0' + '.seg'))\n utils.ensure_file_exists(segname)\n else:\n# print str(self._diar_conf[0])\n# print str(self._diar_conf[1])\n fm.diarization(self._basename, str(self._diar_conf[0]),\n str(self._diar_conf[1]))\n self._status = 2",
"def save_all_chunks_with_labels(audio_dir, json_dir, csv_dir):\n for file in os.listdir(json_dir):\n file_path = os.path.join(json_dir, file)\n audio_file_path = os.path.join(audio_dir, file)[:-4] + \"wav\"\n with open(file_path) as f:\n data = json.load(f)\n save_arrays_with_labels(audio_file_path, data, csv_dir)",
"def split_multiple_recordings_file(file_path, min_silence_duration=0.25, noise_threshold=150):\n print(file_path)\n rate, audio = scipy.io.wavfile.read(file_path)\n split_recordings = split_multiple_recordings(audio, min_silence_duration=min_silence_duration,\n noise_threshold=noise_threshold, sample_rate_hz=rate)\n\n if file_path.count('.') != 1:\n raise Exception('File_path must contain exactly one period, usually in extension. IE: /home/test.wav')\n\n for idx, recording in enumerate(split_recordings):\n print(\"spliting \" + file_path)\n new_file_path = file_path.split('.')[0] + '_' + str(idx) + \".wav\"\n scipy.io.wavfile.write(new_file_path, rate, recording)",
"def slice_recording(path_recording, path_metadata_filepath_duration):\n\n metadata_filepath_duration = open(path_metadata_filepath_duration, 'r')\n\n start = 0.0\n\n for line in metadata_filepath_duration:\n filepath, duration = line.split(\" | \")\n target_filepath = re.sub('/Mixtures/', '/mic_recordings/Mixtures/', filepath)\n target_parentpath = re.sub('/mixture.wav', '', target_filepath)\n\n # creating folder if the folder doesnot exist\n try:\n os.makedirs(target_parentpath)\n except OSERROR as exception:\n if exception.errno == errno.EEXIST and os.path.isdir(target_parentpath):\n pass\n\n delta_t = float(duration)\n\n # calling ffmpeg to slice the wav file into its respective sizes\n subprocess.call([\"ffmpeg\", \"-i\", path_recording, \"-ss\", str(start), \"-t\", str(delta_t), \"-acodec\", \"copy\", target_filepath])\n\n # resetting the start for next file in line\n start += delta_t\n\n metadata_filepath_duration.close()",
"def get_large_audio_transcription(path):\n # open the audio file using pydub\n r = sr.Recognizer()\n sound = AudioSegment.from_mp3(path)\n sound.export(\"tmp.wav\", format=\"wav\")\n sound = AudioSegment.from_wav('tmp.wav')\n # split audio sound where silence is 700 miliseconds or more and get chunks\n chunks = split_on_silence(sound,\n # experiment with this value for your target audio file\n min_silence_len = 500,\n # adjust this per requirement\n silence_thresh = sound.dBFS-14,\n # keep the silence for 1 second, adjustable as well\n keep_silence=500,\n )\n folder_name = \"audio-chunks\"\n # create a directory to store the audio chunks\n if not os.path.isdir(folder_name):\n os.mkdir(folder_name)\n whole_text = \"\"\n\n chapter=(str(path.split('/')[-1])).split('_')[3]\n # if chapter == '01':\n # target=2\n # else:\n # target=1\n target=2\n # process each chunk\n for i, audio_chunk in enumerate(chunks, start=1):\n # export audio chunk and save it in\n # the `folder_name` directory.\n if i==1:\n chunk_filename = os.path.join(folder_name, f\"chunk{i}.wav\")\n audio_chunk.export(chunk_filename, format=\"wav\")\n # recognize the chunk\n with sr.AudioFile(chunk_filename) as source:\n audio_listened = r.record(source)\n # try converting it to text\n try:\n text = r.recognize_google(audio_listened,language=\"en-US\")\n\n except sr.UnknownValueError as e:\n print(\"Error:\", str(e))\n else:\n #text = f\"{text.capitalize()}. \"\n #print(chunk_filename, \":\", text)\n whole_text += text\n # return the text for all chunks detected\n else:\n chunk_filename = os.path.join(folder_name, f\"chunk{i}.wav\")\n audio_chunk.export(chunk_filename, format=\"wav\")\n # recognize the chunk\n with sr.AudioFile(chunk_filename) as source:\n audio_listened = r.record(source)\n # try converting it to text\n try:\n text = r.recognize_google(audio_listened, language=\"en-US\")\n\n except sr.UnknownValueError as e:\n print(\"Error:\", str(e))\n else:\n #text = f\"{text.capitalize()}. \"\n # print(chunk_filename, \":\", text)\n if chapter == '01':\n whole_text += ' ' +text\n if str(text).isalnum():\n if str(text).split(' ')[0]==' ':\n whole_text += text\n else: whole_text += ' '+text\n # return the text for all chunks detected\n\n if i==target:\n break\n if os.path.isfile('tmp.wav') :os.remove('tmp.wav')\n subprocess.run([\"rm\", \"-rf\", folder_name])\n return whole_text",
"def save_separated_audio(self, audios, filename):\n\n # Create folder with mixture name\n folder_path = os.path.join(self.config[\"separated_audio_folder\"], os.path.splitext(filename)[0])\n os.makedirs(folder_path)\n # Save each separated source\n for class_idx, audio in enumerate(audios):\n librosa.output.write_wav(os.path.join(folder_path, self.data_set.classes[class_idx]) + '.wav',\n audio.T,\n sr=self.data_set.config[\"sampling_rate\"])\n # Also copy the mixture in the folder\n copyfile(self.data_set.audio_full_filename(filename), os.path.join(folder_path, \"original_mix.wav\"))",
"def segment_10s(audio, sr):\n seg_files = {}\n n_seg = int((len(audio)/sr)/10)\n for i in range(n_seg):\n segment = audio[10*i*sr:(i+1)*10*sr]\n seg_files[i] = segment\n return seg_files",
"def write_audio_segment(self, data):\n cache_name = self.CACHE_FILE_NAME + str(time.time()) + '.wav'\n file = open(cache_name, \"wb\")\n file.write(data)\n file.close()\n return cache_name",
"def audio_file_save(folder_path, current_time, data, name_by_date):\r\n\r\n name_by_time = current_time + '.wav' #timestamp for the audio file name\r\n usage = disk_usage(folder_path)\r\n if usage.used / usage.total < args.storage_threshold:\r\n file_path = os.path.join(folder_path, name_by_time)\r\n\r\n if args.resampling:\r\n sampling_rate = args.resampling_rate\r\n audio = audio_resampling(data)\r\n else:\r\n sampling_rate = args.recording_samplerate\r\n audio = data\r\n\r\n sf.write(file_path , audio, sampling_rate)\r\n\r\n else:\r\n name = os.path.join(folder_path, name_by_date + '.txt')\r\n f = open(name, 'a')\r\n f.write(current_time + '\\t Activity Detected \\n')\r\n f.close()",
"def save_sample(file_path, sampling_rate, audio):\n audio = (audio.numpy() * 32768).astype(\"int16\")\n write(file_path, sampling_rate, audio)",
"def make_profiles(datafolder, profilefolder, size):\n files = os.listdir(datafolder) \n for file in files:\n languagename = file.split(\"-\")[0]\n encodering = file.split(\"-\")[1]\n bestand = open('training/' + file,'r' , encoding=encodering) #Reads with the correct encoding.\n test = langdetect.trigram_table(bestand.read(), size) #Creates a ngram table of the content of the file.\n filename = languagename + '.' + str(size) + '.txt' #Creates a new filename.\n newfile = open('trigram-models/' + filename, 'w', encoding=\"utf-8\") \n langdetect.write_trigrams(test, 'trigram-models/' + filename) #Creates a new file with the ngrams and their frequency.\n newfile.close()",
"def preprocess_dataset(dataset_path, SAMPLES_TO_CONSIDER: int, num_mfcc = 13, n_fft = 2048, hop_length = 512):\r\n\r\n data = {\r\n 'mapping': [],\r\n 'labels': [],\r\n 'MFCCs': [],\r\n 'files': []\r\n }\r\n\r\n # loop through all sub-dirs\r\n total_samples = 0\r\n valid_samples = 0\r\n for i, (dirpath, dirname, filenames) in tqdm(enumerate(os.walk(dataset_path))):\r\n\r\n # ensure we're at sub-folder level\r\n if dirpath is not dataset_path:\r\n # save label (i.e., sub-folder name) in the mapping\r\n label = dirpath.partition('speech_commands_subset')[-1][1:]\r\n\r\n data['mapping'].append(label)\r\n print(\"\\nProcessing: '{}'\".format(label))\r\n print(\"number of files for each class: \", len(filenames))\r\n # process all audio files\r\n for f in filenames:\r\n total_samples += 1\r\n file_path = os.path.join(dirpath, f)\r\n\r\n # load audio file and slice it to ensure length consistency among different files\r\n signal, sample_rate = librosa.load(file_path)\r\n # print(signal.shape)\r\n # print(type(signal[0]))\r\n\r\n # drop audio files with less than pre-decided number of samples\r\n if len(signal) >= SAMPLES_TO_CONSIDER:\r\n valid_samples += 1\r\n # ensure consistency of the length of the signal\r\n signal = signal[:SAMPLES_TO_CONSIDER]\r\n\r\n # extract MFCCs\r\n MFCCs = librosa.feature.mfcc(signal, sample_rate, n_mfcc = num_mfcc, n_fft = n_fft, \r\n hop_length = hop_length) \r\n # print(MFCCs.shape)\r\n # print(type(MFCCs[0,0]))\r\n\r\n # store data for analysed track\r\n data['MFCCs'].append(MFCCs.T.tolist())\r\n data['labels'].append(i-1)\r\n # data['files'].append(file_path)\r\n # print(\"{}: {}\".format(file_path, i-1))\r\n\r\n # if valid_samples == 20:\r\n # valid_samples =0\r\n # break\r\n print(\"\\ntotal samples: \", total_samples)\r\n print(\"\\nvalid_samples: \", valid_samples)\r\n\r\n \r\n return data",
"def split_on_silence_threshold(wav_file, dest_dir):\n # Read the file\n audioSegment = AudioSegment.from_wav(wav_file)\n # Calculating the silence threshold\n # Normalizing the audio file belfore finding the threshold\n full_audio_wav = normalize(audioSegment)\n loudness_ms_list = [] # Save the audio levels of all the chunks\n for ms_chunk in full_audio_wav:\n loudness_ms_list.append(round(ms_chunk.dBFS))\n print(\"Audio levels are recorded\", file=sys.stderr)\n # Using pandas df for easier manipulation\n df = pd.DataFrame(loudness_ms_list)\n df[0] = df[df[0] != float(\"-inf\")] # Remove the very low levels\n st = df[0].mean()\n st = st if st < -16 else -16 # Because -16db is default\n # Splits the audio if silence duration is MSL long\n MSL = 500 # minimum silence length in ms\n chunks = split_on_silence(\n full_audio_wav, \n # split on silences longer than 500ms (500ms)\n min_silence_len=MSL, \n # anything under -16 dBFS is considered silence\n silence_thresh=st, \n # keep 200 ms of leading/trailing silence\n keep_silence=200, \n )\n # Saving all the chunks\n print(\"Writing all the files, this may take some time!\", file=sys.stderr)\n for index, chunk in enumerate(chunks):\n chunk_file_name = os.path.join(dest_dir, \"sample_{}.wav\".format(str(index).zfill(10)))\n print(\"Saving the file to \" + chunk_file_name, file=sys.stderr)\n # You can export as mp3 etc, note that it has dependency on ffmpeg\n chunk.export(chunk_file_name, format=\"wav\")",
"def create_wav_file(self, ):\n\n f_out = open(self.wav_file, 'w')\n u_utt2spk = open(self.utt2spk, 'w')\n for file in glob.glob(self.wav_folder+'/*.wav'):\n base = os.path.basename(file).split('.')[0]\n # write to scp file\n f_out.write(base + '\\t' + file + '\\n')\n u_utt2spk.write(base + '\\t' + 'tts' + '\\n')",
"def save_segmentation_samples(self, dest=\"./Datasets/IsophonicsSegmentation.seg\", song_indices=[0, 10, 20, 30, 40, 50, 60, 70], hop_length=512, norm_to_C=False, spectrogram_generator=log_mel_spectrogram, n_frames=500):\n data = []\n chords = []\n gold_targets = []\n # Iterate over all song indices on the input\n for song_ind in song_indices:\n # Prprocess audio\n preprocessed_audio = IsophonicsDataset.preprocess_audio(\n waveform=self.DATA[song_ind].WAVEFORM,\n sample_rate=self.DATA[song_ind].SAMPLE_RATE,\n spectrogram_generator=spectrogram_generator,\n nfft=self.NFFT, hop_length=hop_length,\n norm_to_C=norm_to_C, key=self.KEYS[song_ind].get_first_key()\n ).swapaxes(0,1)\n\n num_samples, _ = preprocessed_audio.shape\n\n # Convert data and chord targets to sequences\n data_in_seqs, targets_in_seqs = Dataset.songs_to_sequences(\n FEATURESs=[preprocessed_audio],\n CHORDs=[self.CHORDS[song_ind]],\n TIME_BINSs=[[float(i)/(float(self.SAMPLE_RATE) / float(hop_length)) for i in range(num_samples)]],\n KEYs=self.KEYS[song_ind].get_first_key(),\n n_frames=n_frames,\n norm_to_C=norm_to_C\n )\n\n # Add song's sequences to lists as a new element\n data.append(data_in_seqs)\n chords.append(targets_in_seqs)\n gold_targets.append(SegmentationCRNN.labels2changes(targets = chords[-1]))\n\n # Save all three np arrays generated in this function .. data, chords, gold_targets aka chord changes\n with lzma.open(dest, \"wb\") as dataset_file:\n pickle.dump((data, chords, gold_targets), dataset_file)\n\n print(\"[INFO] The Isophonics segmentation samples was saved successfully.\")",
"def compress_segments(map_, wav_id, file_path, segments, outpath):\n try:\n audio = AudioSegment.from_wav(file_path)\n #print(\"\\nSegments:\", len(segments))\n for _, row in segments.iterrows():\n start = row[2] * 1000\n end = row[3] * 1000\n audio_chunk = audio[start:end]\n save_path = \"{}/{}_chunk_{}_{}.wav\".format(outpath, wav_id, start, end)\n audio_chunk.export(save_path, format='wav')\n compress_file(map_=map_, \n name=row[0],\n save_path=save_path)\n except Exception as e:\n print(\"ERR:\",e)\n print(\"Failed files:\", file_path)",
"def create_sample_files(bam_file_name, fractions):\n\n print(\"running sample retreival: \\n\\n\")\n\n sample_folder = \"./%s_sample_stats/\" % bam_file_name.replace(\n \"_mRNA.bam\", \"\")\n\n if not os.path.exists(sample_folder):\n os.mkdir(sample_folder)\n os.chdir(sample_folder)\n else:\n os.chdir(sample_folder)\n print(\"fractions: \", fractions)\n for decimal in fractions:\n print(\"%s: started\" % decimal)\n file_var = \"%s_sample.bam\" % str(decimal).replace(\"0.\", \"\")\n\n # extract sample\n print(bam_file_name, file_var)\n sample_cmd = \"samtools view ../%s -s %s -b -@12 > %s\" % (\n bam_file_name, '{:f}'.format(decimal), file_var)\n print(sample_cmd)\n os.system(sample_cmd)",
"def save(self, fname, master_volume=1.):\n \n # first pass - find max amplitude value to normalise output\n vmax = 0.\n for c in range(len(self.out_channels)):\n vmax = max(\n abs(self.out_channels[str(c)].values.max()),\n abs(self.out_channels[str(c)].values.min()),\n vmax\n )\n\n # normalisation for conversion to int32 bitdepth wav\n norm = master_volume * (pow(2, 31)-1) / vmax\n\n # setup array to house wav stream data \n chans = np.zeros((self.out_channels['0'].values.size,\n len(self.out_channels)), dtype=\"int32\")\n \n # normalise and collect channels into a list\n for c in range(len(self.out_channels)):\n vals = self.out_channels[str(c)].values\n chans[:,c] = (vals*norm).astype(\"int32\")\n \n # finally combine and write out wav file\n wavfile.write(fname, self.samprate, chans)\n print(f\"Saved {fname}\")",
"def save_to_file(\n sources,\n codec='wav', audio_adapter=ffmpeg.FFMPEGProcessAudioAdapter(),\n bitrate='128k', synchronous=True):\n\n # filename = \"chengdu.mp3\"\n pool = Pool()\n tasks = []\n for instrument, data in sources.items():\n path = \"./out/\"+instrument + \".\" + codec\n\n if pool:\n task = pool.apply_async(audio_adapter.save, (\n path,\n data,\n 44100,\n codec,\n bitrate))\n tasks.append(task)\n else:\n audio_adapter.save(path, data, 44100, codec, bitrate)\n if synchronous and pool:\n while len(tasks) > 0:\n task = tasks.pop()\n task.get()\n task.wait(timeout=200)",
"def gen_random_samples():\n if os.path.exists('Song_Samples'):\n pass\n else:\n os.mkdir('Song_Samples')\n for filename in os.listdir(\"Songs\"):\n rate, data = wavfile.read(os.path.join(\"Songs\", filename))\n song_duration = len(data) // rate\n start_point = randint(0, song_duration - SAMPLE_DURATION)\n end_point = start_point + SAMPLE_DURATION\n subprocess.call(['ffmpeg', '-i', os.path.join(\"Songs\", filename),\n '-ss', str(datetime.timedelta(seconds=start_point)), '-to',\n str(datetime.timedelta(seconds=end_point)), '-y', os.path.join(\"Song_Samples\", filename)])",
"def save_stereo(self, fname, master_volume=1.):\n\n if len(self.out_channels) > 2:\n print(\"Warning: sonification has > 2 channels, only first 2 will be used. See 'save_combined' method.\")\n \n # first pass - find max amplitude value to normalise output\n # and concatenate channels to list\n vmax = 0.\n channels = []\n for c in range(min(len(self.out_channels), 2)):\n vmax = max(\n abs(self.out_channels[str(c)].values.max()),\n abs(self.out_channels[str(c)].values.min()),\n vmax\n ) / master_volume\n channels.append(self.out_channels[str(c)].values)\n \n wav.write(fname, \n np.column_stack(channels),\n self.samprate, \n scale = (-vmax,vmax),\n sampwidth=3)\n \n print(\"Saved.\")",
"def play(sampler, name=\"/Users/Jxie0755/Documents/DXcodings/Learning_Python/CS_61A/week03/mario.wav\", seconds=2):\n out = open(name, \"wb\")\n out.setnchannels(1)\n out.setsampwidth(2)\n out.setframerate(frame_rate)\n t = 0\n while t < seconds * frame_rate:\n sample = sampler(t)\n out.writeframes(encode(sample))\n t = t + 1\n out.close()"
] | [
"0.5985446",
"0.5802684",
"0.57816744",
"0.5737059",
"0.5693634",
"0.5681921",
"0.55937463",
"0.5582633",
"0.5564124",
"0.55507296",
"0.55322385",
"0.5512459",
"0.55056393",
"0.5496371",
"0.54751974",
"0.5456971",
"0.5410829",
"0.5401392",
"0.53417194",
"0.5321298",
"0.53151065",
"0.5279356",
"0.5265418",
"0.5257917",
"0.5230108",
"0.5228325",
"0.5216217",
"0.5215454",
"0.52043396",
"0.52029663"
] | 0.70655805 | 0 |
Load an audio file (or segment). Add random noise to the file and save with new filename. | def noisy_data(filename, split='train', clf='gender'):
filepath = 'data/{}/{}/{}o.wav'.format(clf, split, filename)
audio, sr = librosa.load(filepath, sr=16000)
# Add noise
noisy = add_noise(audio)
# Write noise to file
sf.write('data/{}/{}/{}n.wav'.format(clf, split, filename), noisy, sr)
#print("Noise added to {}".format(filename)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_audio(file_path):\n # load the audio file in its original sampling rate\n audio_data, sr = librosa.load(file_path, sr=sampling_rate)\n\n # get the common file name\n file_name = file_path.split(\"/\")[-1]\n file_name = file_name.split(\".wav\")[0]\n\n # calculate number of samples in the time duration needed\n num_samples = int(sr*time_duration)\n\n # get the cut-off audio signals and save them\n for i in np.arange(num_audio_files):\n audio_cut_data = cut_sample(audio_data, num_samples)\n file_path=dir_to_save + file_name + \"_\" + str(i+1) + \".wav\"\n save_sample(audio_cut_data, file_path, sr)\n print(f\"generating signal {str(i)}, its length {len(audio_cut_data)} by cutting the original signal\")",
"def transform_audio(self, segment: Union[AudioSegment, SpeechSegment]) -> None:\n noise_data = self._rng.sample(self._noise_data, 1)[0]\n if noise_data[\"duration\"] < segment.duration:\n raise RuntimeError(\"The duration of sampled noise audio is smaller than the audio segment.\")\n diff_duration = noise_data[\"duration\"] - segment.duration\n start = self._rng.uniform(0, diff_duration)\n end = start + segment.duration\n noise_seg = AudioSegment.from_slice_file(noise_data[\"src\"], start=start, end=end)\n snr_dB = self._rng.uniform(self._min_snr_dB, self._max_snr_dB)\n segment.add_noise(noise_seg, snr_dB=snr_dB, allow_downsampling=True, rng=self._rng)",
"def load_randomly_augmented_audio(path, sample_rate=16000, tempo_range=(0.85, 1.15),\n gain_range=(-6, 8)):\n low_tempo, high_tempo = tempo_range\n tempo_value = np.random.uniform(low=low_tempo, high=high_tempo)\n low_gain, high_gain = gain_range\n gain_value = np.random.uniform(low=low_gain, high=high_gain)\n audio = augment_audio_with_sox(path=path, sample_rate=sample_rate,\n tempo=tempo_value, gain=gain_value)\n return audio",
"def __save(self,audio):\n self.__openSampleFile()\n self.__sampleFile.writeframes(audio)",
"def trim_silence_file(file_path, noise_threshold=150):\n rate, audio = scipy.io.wavfile.read(file_path)\n trimmed_audio = trim_silence(audio, noise_threshold=noise_threshold)\n print()\n scipy.io.wavfile.write(file_path, rate, trimmed_audio)",
"def audio_file_save(folder_path, current_time, data, name_by_date):\r\n\r\n name_by_time = current_time + '.wav' #timestamp for the audio file name\r\n usage = disk_usage(folder_path)\r\n if usage.used / usage.total < args.storage_threshold:\r\n file_path = os.path.join(folder_path, name_by_time)\r\n\r\n if args.resampling:\r\n sampling_rate = args.resampling_rate\r\n audio = audio_resampling(data)\r\n else:\r\n sampling_rate = args.recording_samplerate\r\n audio = data\r\n\r\n sf.write(file_path , audio, sampling_rate)\r\n\r\n else:\r\n name = os.path.join(folder_path, name_by_date + '.txt')\r\n f = open(name, 'a')\r\n f.write(current_time + '\\t Activity Detected \\n')\r\n f.close()",
"def save_separated_audio(self, audios, filename):\n\n # Create folder with mixture name\n folder_path = os.path.join(self.config[\"separated_audio_folder\"], os.path.splitext(filename)[0])\n os.makedirs(folder_path)\n # Save each separated source\n for class_idx, audio in enumerate(audios):\n librosa.output.write_wav(os.path.join(folder_path, self.data_set.classes[class_idx]) + '.wav',\n audio.T,\n sr=self.data_set.config[\"sampling_rate\"])\n # Also copy the mixture in the folder\n copyfile(self.data_set.audio_full_filename(filename), os.path.join(folder_path, \"original_mix.wav\"))",
"def gen_random_samples():\n if os.path.exists('Song_Samples'):\n pass\n else:\n os.mkdir('Song_Samples')\n for filename in os.listdir(\"Songs\"):\n rate, data = wavfile.read(os.path.join(\"Songs\", filename))\n song_duration = len(data) // rate\n start_point = randint(0, song_duration - SAMPLE_DURATION)\n end_point = start_point + SAMPLE_DURATION\n subprocess.call(['ffmpeg', '-i', os.path.join(\"Songs\", filename),\n '-ss', str(datetime.timedelta(seconds=start_point)), '-to',\n str(datetime.timedelta(seconds=end_point)), '-y', os.path.join(\"Song_Samples\", filename)])",
"def save_sample(file_path, sampling_rate, audio):\n audio = (audio.numpy() * 32768).astype(\"int16\")\n write(file_path, sampling_rate, audio)",
"def random_sample(input_name):\n\t#Count number of lines in original file\n\twith open(input_name) as f:\n\t\told_size = len(f.readlines())\n\t#Determine number of lines for new file\n\tnew_size=int(round(sum(1 for row in open(input_name))* args.rnd_sample))\n\t#Create name for sub-sampled file\n\tSampledFileName, SampledExten = os.path.splitext(input_name)\n\tSampledName = '%s_smpld%s' % (SampledFileName,SampledExten)\n\t#Randomly select the desired number of lines and print to new file\n\twith open(SampledName,\"wb\") as sink:\n\t\tfor i in random.sample(range(0, old_size), new_size):\n\t\t\tsink.write(linecache.getline(input_name, i))\n\tlinecache.clearcache()",
"def write_audio_to_file(audio: torch.Tensor, sample_id: str = ''):\n global FS_HZ\n assert FS_HZ is not None\n audio_extension = '.wav'\n audio_path = upload_directory + 'sample' + sample_id + audio_extension\n audio_np = audio.cpu().numpy()\n with open(audio_path, 'wb') as f:\n soundfile.write(f,\n audio_np,\n samplerate=FS_HZ)\n return audio_path",
"def load_sample(filename):\n return open(os.path.join(SAMPLES, filename)).read()",
"def write_audio_segment(self, data):\n cache_name = self.CACHE_FILE_NAME + str(time.time()) + '.wav'\n file = open(cache_name, \"wb\")\n file.write(data)\n file.close()\n return cache_name",
"def segment_audio(filename, y_value, split='train', clf='gender'):\n\n filepath = 'recordings/recordings/' + filename + '.mp3'\n audio, sr = librosa.load(filepath, sr=16000)\n audio = normalize(audio)\n\n # Add gender label to filename for later processing\n sex = y_value\n if sex == 'female':\n filename = '{}.F'.format(filename)\n else: filename = '{}.M'.format(filename)\n\n # Segment audio file\n seg_files = segment_10s(audio, sr)\n\n for key, val in seg_files.items():\n new_name = '{}.{}'.format(filename, key)\n sf.write('data/{}/{}/{}o.wav'.format(clf, split, new_name), val, sr)",
"def __newSampleFile(self):\n self.__newFileName()\n self.__sampleFile = wav.open(self.__fileName, self.OPEN_MODE)\n self.__sampleFile.setnchannels(NUM_CHANNELS)\n self.__sampleFile.setsampwidth(self.__audio.get_sample_size(self.FORMAT))\n self.__sampleFile.setframerate(FS)",
"def save_sound(filename,sound,sample_freq,num_channels):\n #open a wave file in write ('w') mode, this will create the file\n file=wave.open(filename,'w')\n #set the framerate aka sample frequency\n file.setframerate(sample_freq)\n #set the number of the channels\n file.setnchannels(num_channels)\n #the size of the one sample in bytes\n file.setsampwidth(2)\n #write the actual sound to the file, notice the call to get_raw\n file.writeframesraw(sound.get_raw())\n file.close()",
"def remove_file(path, save):\n if not save:\n os.remove(path)\n print \"[crawler] removing audio file...\"",
"def record_audio_to_file(self, time, file_name):\n sample_width, frames = self.record_audio(time)\n wf = wave.open(file_name, 'wb')\n wf.setnchannels(self.channels)\n wf.setsampwidth(sample_width)\n wf.setframerate(self.rate)\n wf.writeframes(frames)\n wf.close()",
"def generate_seg_file(self, filename):\n self._generate_a_seg_file(filename, self.wave[:-4])",
"def save(cls, audiobook, file_name):\n os.unlink(file_name)",
"def snip(filename,s,e,wout=True):\n n, data, data_dB,sr,ch=inputwav(filename)\n st=int(s*44100)\n en=int(e*44100)\n data_s=data[st:en,:]\n if wout==True:\n print('Exporting...')\n sf.write(filename[0:len(filename)-4]+'_snipped.wav',data_s,sr,'PCM_16')\n print('Done!')\n return data_s",
"def load_wav_to_torch(self, full_path):\n data, sampling_rate = load(full_path, sr=self.sampling_rate)\n data = 0.95 * normalize(data)\n\n if self.augment:\n amplitude = np.random.uniform(low=0.3, high=1.0)\n data = data * amplitude\n\n return torch.from_numpy(data).float(), sampling_rate",
"def augment_audio_with_sox(path, sample_rate, tempo, gain):\n try:\n with NamedTemporaryFile(suffix=\".wav\") as augmented_file:\n augmented_filename = augmented_file.name\n sox_augment_params = [\"tempo\", \"{:.3f}\".format(tempo), \"gain\", \"{:.3f}\".format(gain)]\n sox_params = \"sox \\\"{}\\\" -r {} -c 1 -b 16 -e si {} {} >/dev/null 2>&1\".format(path, sample_rate,\n augmented_filename,\n \" \".join(sox_augment_params))\n os.system(sox_params)\n y = load_audio(augmented_filename)\n except Exception as E:\n y = load_audio(path)\n return y",
"def remove_silence_audio() -> None:\n # Read the wav file and get rate and list of data\n rate, data = scipy.io.wavfile.read('Test.wav')\n\n # Create list for data of amended wav file\n data2 = []\n\n # Loop through data of original file and add data that doesn't meed condition: values >= -10 and <= 10\n for i in range(len(data)):\n if data[i][0] >= -10 and data[i][0] <= 10:\n pass\n else:\n data2.append(data[i])\n\n # Create NumPy array from revised data\n data2 = np.asarray(data2, dtype=np.int16)\n\n # Write new data to wav file\n scipy.io.wavfile.write('Test.wav', rate, data2)\n\n return None",
"def make_music_rand():\n pass",
"def create_noise_data_from_original():\n # Get the noise values we'll choose from.\n noiseValues = AudioDataOriginal.query.filter(\n AudioDataOriginal.datetime >= '2017-06-14 07:26:24',\n AudioDataOriginal.datetime <= '2017-06-14 07:27:54',\n ).all()\n # Add a noise value to each record with a `processedValue`, as these are the\n # only ones used in the model generation later on.\n audioSamples = AudioDataOriginal.query.filter(\n AudioDataOriginal.datetime > '2017-06-14 07:27:54',\n AudioDataOriginal.processedValue.isnot(None)\n ).all()\n for sample in audioSamples:\n noiseRecord = random.choice(noiseValues)\n sample.noiseValue = noiseRecord.audio\n db.session.commit()",
"def sample_sentences_from_file(file, fraction):\n with open(file, 'r') as f:\n lines = f.readlines()\n new_file_size = ceil(fraction*len(lines))\n rand_lines = sample(lines, new_file_size)\n new_file = file+\"_sampled-\"+str(new_file_size)+\".txt\"\n with open(new_file, 'w') as f:\n f.writelines(rand_lines)\n return new_file",
"def play_audio(filename):\n os.system(AUDIOPLAYER + ' ' + filename)",
"def normalize_audio(audio_path: str, output_path: str, name: str):\n sound = AudioSegment.from_file(audio_path + os.sep + name + '.wav',\n \"wav\")\n change_in_d_bfs = (-20.0) - sound.dBFS\n sound = sound.apply_gain(change_in_d_bfs)\n sound.export(output_path + os.sep + name + '.wav', format=\"wav\")",
"def create_noise_dataset(cfg):\n here = os.path.dirname(__file__)\n basedir = os.path.join(here, cfg['data.mix_background_noise.audio_dir'])\n audio_files = find_files(basedir,\n cfg['data.mix_background_noise.audio_regexp'])\n sample_rate = cfg['data.sample_rate']\n audios = [audio.WavFile(fn, sample_rate=sample_rate)\n for fn in tqdm.tqdm(audio_files, 'Reading noise',\n ascii=bool(cfg['tqdm.ascii']))]\n segment_files = [os.path.splitext(fn)[0] + '.csv' for fn in audio_files]\n segments = [read_noise_csv(fn, sample_rate, len(wav))\n if os.path.exists(fn)\n else [(0, len(wav))]\n for fn, wav in zip(segment_files, audios)]\n return NoiseDataset(audios, segments,\n min_length=sample_rate * cfg['data.len_min'])"
] | [
"0.62971425",
"0.6159043",
"0.5984587",
"0.59020734",
"0.58086365",
"0.5675305",
"0.56475496",
"0.5606833",
"0.559785",
"0.5565294",
"0.54593736",
"0.54581594",
"0.5443855",
"0.5441127",
"0.54234785",
"0.542045",
"0.5408668",
"0.53995633",
"0.53893155",
"0.5381253",
"0.53792244",
"0.53454196",
"0.5334369",
"0.53066516",
"0.53053373",
"0.52917856",
"0.5278346",
"0.5245606",
"0.5235608",
"0.5198715"
] | 0.689111 | 0 |
Release a lock on the bus | def bus_release(self):
self._bus_lock.release() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def release_lock():\r\n get_lock.n_lock -= 1\r\n assert get_lock.n_lock >= 0\r\n # Only really release lock once all lock requests have ended.\r\n if get_lock.lock_is_enabled and get_lock.n_lock == 0:\r\n get_lock.start_time = None\r\n get_lock.unlocker.unlock()",
"def release_lock(self):\n if self.lock:\n self.lock.release()",
"def release_lock(self):\n self._multistore._unlock()",
"def unlock(lock):\n lock.release()",
"def release_lock (self):\n\n self.connection.commit ()\n self.locked = False",
"def release(self):\n fcntl.flock(self.lock_file, fcntl.LOCK_UN)",
"def release(self, o):\n if not self.available(o):\n raise ValueError('you do not own this lock')\n self._owner = None",
"def release(self, bay_uuid):\n # Only the conductor that owns the lock will be releasing it.\n result = objects.BayLock.release(bay_uuid, self.conductor_id)\n if result is True:\n LOG.warn(_LW(\"Lock was already released on bay %s!\"), bay_uuid)\n else:\n LOG.debug(\"Conductor %(conductor)s released lock on bay \"\n \"%(bay)s\" % {'conductor': self.conductor_id,\n 'bay': bay_uuid})",
"def _release(self):\n try:\n os.unlink(self.lockfile)\n\n # Log success.\n logging.info(\"Released lock at \" + self.lockfile + \"...\")\n except:\n # Ignore all errors.\n pass",
"def release_lock():\n lock_file = get_lock_file()\n if exists(lock_file):\n LOG.info('Removing lock file %r' % lock_file)\n os.unlink(lock_file)\n else:\n LOG.warning('Lock file %r did not exist.' % lock_file)",
"def release(self):\r\n if self.is_locked:\r\n os.close(self.fd)\r\n os.unlink(self.lockfile)\r\n self.is_locked = False",
"def release(self):\n self.is_locked = False\n os.unlink(self.lockfile)",
"def release(self):\n if self.is_locked:\n os.close(self.fd)\n os.unlink(self.lockfile)\n self.is_locked = False",
"def ReleaseLock(self, lock_data=None):\n if self._acquired_lock is not None:\n if lock_data is not None:\n lock_data = LockData(lock_data)\n\n self._acquired_lock.expire_time = datetime.datetime.min # Force expire.\n self._acquired_lock.lock_data = lock_data\n self._acquired_lock.put()\n else:\n self._acquired_lock.delete()",
"def __del__(self):\n if self.is_locked:\n self.release()",
"def unlock (self):\n fcntl.flock(self._lockHandle, fcntl.LOCK_UN)\n self._lockHandle.close()",
"def unlock(self):\n\n\t\t# Release the file lock first\n\t\tfcntl.lockf(self.lockfile, fcntl.LOCK_UN)\n\t\t# Release the thread lock\n\t\tself.s.release()",
"def write_release(self):\n self.is_locked = False\n self.rwlock = RWLock().write_release()",
"def unlock(self):\n self.mtx.release()",
"def release_node(self, node):\n # use the lua script to release the lock in a safe way\n try:\n node._release_script(keys=[self.resource], args=[self.lock_key])\n except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError):\n pass",
"def _release_imp(self):\n\n self._logger.debug(\n 'Release Lock', lock_name=self._lock_name, caler=self._holder)\n\n try:\n self._dynamodb_wrapper.put_item(\n self._table_name,\n {\n mutex_consts.MutexDynamoConfig.lock.value: self._lock_name,\n mutex_consts.MutexDynamoConfig.holder.value: mutex_consts.NO_HOLDER_DATA,\n mutex_consts.MutexDynamoConfig.ttl.value: 0,\n },\n dynamodb_condition.Condition.is_equal(mutex_consts.MutexDynamoConfig.holder.value, mutex_consts.NO_HOLDER_DATA) |\n dynamodb_condition.Condition.is_equal(mutex_consts.MutexDynamoConfig.holder.value, self._holder) |\n dynamodb_condition.Condition.not_exists(mutex_consts.MutexDynamoConfig.lock.value))\n\n except (dynamodb_exceptions.PutItemConditionException, dynamodb_exceptions.PutItemException):\n self._logger.log_and_raise(\n mutex_exceptions.MutexReleaseFailedException, self._lock_name, self._holder, str(self._ttl))",
"def release(self):\n self.filelock.set()\n self.locked = False\n self.exclusive = False",
"def release(self):\n self.filelock.set()\n self.locked = False\n self.exclusive = False",
"def un_lock(self):\n self._un_lock()",
"def api_release(self):\n\n self._api_release_lock_with_timer()",
"def _release_lock(self, job_info):\n os.remove(self.lock_file)\n self.logger.debug(\"lock release for '%s'\" % job_info)",
"def release_lock(self, lockname, identifier):\n conn = self.conn\n pipe = conn.pipeline(True)\n lockname = \"lock:\" + lockname\n\n while True:\n try:\n pipe.watch(lockname)\n cur_id = pipe.get(lockname)\n if cur_id and cur_id.decode(\"utf-8\") == identifier:\n pipe.multi()\n pipe.delete(lockname)\n pipe.execute()\n return True\n\n pipe.unwatch()\n break\n\n except self.__redis_mod.exceptions.WatchError:\n pass\n\n return False",
"def release_lock(self):\n senlin_lock.node_lock_release(self.entity.id, self.id)\n\n # only release cluster lock if it was locked as part of this\n # action (i.e. it's a user intiated action aka CAUSE_RPC from\n # senlin API and a not a CAUSED_DERIVED)\n if self.cause == consts.CAUSE_RPC:\n senlin_lock.cluster_lock_release(self.entity.cluster_id, self.id,\n senlin_lock.NODE_SCOPE)\n return self.RES_OK",
"def release_named_lock(self, name):\r\n self.log.debug(\"Releasing named lock (%s)\" % name)\r\n self._named_locks[name].release()",
"def unlock_clock(self):\n self.sem.release()"
] | [
"0.7924762",
"0.7834396",
"0.77969474",
"0.77904904",
"0.74435604",
"0.74268526",
"0.73679256",
"0.7322436",
"0.7123128",
"0.7118452",
"0.71132535",
"0.709967",
"0.7089947",
"0.70288163",
"0.7005624",
"0.699237",
"0.6928809",
"0.6910996",
"0.6896251",
"0.687286",
"0.6866911",
"0.68550605",
"0.68550605",
"0.68473834",
"0.68452495",
"0.6841358",
"0.680688",
"0.6780136",
"0.67540735",
"0.67366713"
] | 0.8140664 | 0 |
Decorator to be used in apimethods to serve the swaggerdocumentation for this api. | def api_documentation(api: str, summary: str, in_model: BaseModel,
out_model: BaseModel, out_description: str) -> Callable:
for model, name in ((in_model, 'Input'), (out_model, 'Output')):
doc.Object(
make_dataclass(
f'Api{api[1:].title()}{name}',
[(key, val.type_, val.type_)
for key, val in model.__dict__['__fields__'].items()]))
im_returns = doc.JsonBody({
key: val.type_
for key, val in in_model.__dict__['__fields__'].items()
})
om_returns = {
key: val.type_
for key, val in out_model.__dict__['__fields__'].items()
}
def decorator(func):
@doc.summary(summary)
@doc.response(412,
'Error: Precondition Failed',
description='The passed request-parameters are invalid')
@doc.response(500,
'Error: Server-Error occured',
description='An internal error occured')
@doc.consumes(im_returns,
content_type='application/json',
location='body')
@doc.produces(om_returns,
content_type='application/json',
description=out_description)
@wraps(func)
async def function_wrapper(request, *args, **kwargs):
return await func(request=request, *args, **kwargs)
return function_wrapper
return decorator | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def index():\n definition = {\n \"swagger\": \"2.0\",\n \"info\": {\n \"title\": flask.current_app.config.get(\"APPNAME\", \"Not specified\"),\n \"version\": flask.current_app.config.get(\"VERSION\", \"Not specified\"),\n },\n \"host\": request.host,\n \"schemes\": [\"http\"],\n \"consumes\": [\"application/json\"],\n \"produces\": [\"application/json\"],\n \"definitions\": registry._definitions,\n \"paths\": {}\n }\n\n rules = list(flask.current_app.url_map.iter_rules())\n for r in sorted(rules, key=operator.attrgetter('rule')):\n if r.rule.startswith('/static'):\n continue\n if r.endpoint in registry._skipped:\n continue\n\n rule = re.sub(r\"<(?:[_a-zA-Z0-9\\(\\)]+:)?([a-zA-Z0-9_]+)>\", r\"{\\1}\", r.rule)\n if rule not in definition['paths']:\n definition['paths'][rule] = {}\n\n methods_handled = r.methods & REST_METHODS\n handler = flask.current_app.view_functions.get(r.endpoint)\n doc = handler.func_doc\n\n if len(methods_handled) == 1:\n method = methods_handled.pop().lower()\n try:\n validated = yaml.safe_load(doc)\n if not isinstance(validated, dict):\n raise Exception(\"Not a descriptor\")\n definition['paths'][rule][method] = validated\n except Exception:\n pass\n\n else:\n # We need to handle multi-method docstrings differently\n # because the documentation needs to define both, and\n # it's a higher level of the swagger hierarchy\n try:\n validated = yaml.safe_load(doc)\n if not isinstance(validated, dict):\n raise Exception(\"Not a descriptor\")\n definition['paths'][rule].update(validated)\n except Exception:\n definition['paths'][rule] = {}\n\n resp = flask.make_response(\n json.dumps(definition, for_json=True))\n resp.headers.set(\"Content-type\", 'application/json')\n resp.headers.set(\"Access-Control-Allow-Origin\", \"*\")\n return resp",
"def apiDocs():\n\treturn render_template('apiDocs.html')",
"def swagger_redirect(request: HttpRequest) -> HttpResponse:\n return HttpResponse('Use /api/v2/docs/ instead', status=410)",
"def swagger():\n return jsonify(current_app.spec.to_dict())",
"def describe(self, *args, **kwargs):\n def _autodoc(func, *_args, **_kwargs):\n if len(_args) > 0:\n #: Instance or class method.\n response = func(_args[0])\n else:\n #: Function.\n if len(_kwargs) > 0:\n response = func(**_kwargs)\n else:\n response = func()\n\n self.parse(args[0], response)\n\n return func\n\n return decorator(_autodoc)",
"def get_documentation(self, *args, **dargs):\n pass",
"def swagger_definition(self, base_path=None, **kwargs):\n return Swagger(\n {\n \"info\": Info(\n {\n key: kwargs.get(key, self.DEFAULT_INFO.get(key))\n for key in Info.fields.keys()\n if key in kwargs or key in self.DEFAULT_INFO\n }\n ),\n \"paths\": self.paths,\n \"swagger\": \"2.0\",\n \"basePath\": base_path,\n }\n ).to_primitive()",
"def documentation_only():\n pass",
"def DeveloperAPI(*args, **kwargs):\n if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):\n return DeveloperAPI()(args[0])\n\n def wrap(obj):\n _append_doc(obj, message='DeveloperAPI: This API may change across minor Ludwig releases.')\n _mark_annotated(obj)\n return obj\n return wrap",
"async def handle_doc(self, request: web.Request) -> web.Response:\n spec = request.app[\"spec\"]\n spec_url = request.app.router[\"openapi_spec\"].url_for()\n title = spec.info.title\n html = f\"\"\"\n <!DOCTYPE html>\n <html>\n <head>\n <title>{title}</title>\n <!-- needed for adaptive design -->\n <meta charset=\"utf-8\"/>\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">\n \"\"\"\n if self.font:\n html += f\"\"\"\n <link href=\"https://fonts.googleapis.com/css?{self.font}\" rel=\"stylesheet\">\n \"\"\"\n html += f\"\"\"\n <link rel=\"shortcut icon\" href=\"{self.favicon_url}\">\n <!--\n ReDoc doesn't change outer page styles\n -->\n <style>\n body {{\n margin: 0;\n padding: 0;\n }}\n </style>\n </head>\n <body>\n <redoc spec-url=\"{spec_url}\"></redoc>\n <script src=\"{self.redoc_js_url}\"> </script>\n </body>\n </html>\n \"\"\"\n return web.Response(text=html, content_type=\"text/html\")",
"def api():\n return send_file('templates/bootstrapper.swagger.json')",
"def main():\n\n return redirect('/apidocs')",
"def generate_documentation(self):\n self.generate_api_docs()\n build.main([\n self.SOURCE_DIR,\n self.BUILD_DIR,\n ])",
"def overview():\n return render_template('api/api.html', title='API Overview')",
"def api_index():\n func_list = {}\n for rule in app.url_map.iter_rules():\n if rule.endpoint != 'static':\n func_list[rule.rule] = app.view_functions[rule.endpoint].__doc__\n return jsonify(func_list)",
"def documentation():\n return render_template('help.html')",
"def get_swagger_view(title=None, url=None, patterns=None, urlconf=None):\n class SwaggerSchemaView(APIView):\n _ignore_model_permissions = True\n exclude_from_schema = True\n permission_classes = [AllowAny]\n renderer_classes = [\n CoreJSONRenderer,\n renderers.OpenAPIRenderer,\n renderers.SwaggerUIRenderer\n ]\n\n def get(self, request):\n generator = CustomSchemaGenerator(\n title=title,\n url=url,\n patterns=patterns,\n urlconf=urlconf\n )\n schema = generator.get_schema(request=request, public=True)\n\n if not schema:\n raise exceptions.ValidationError(\n 'The schema generator did not return a schema Document'\n )\n\n return Response(schema)\n\n return SwaggerSchemaView.as_view()",
"def test_swagger(self):\n response = self.client.get(\"/api/v1/swagger\", query_string=dict(validate_schema=True))\n assert_that(response.status_code, is_(equal_to(200)))\n swagger = loads(response.get_data().decode(\"utf-8\"))\n # we have the swagger docs endpoint too, which is implemented as a query.\n # ignore it here for now.\n del swagger[\"paths\"][\"/swagger/docs\"]\n assert_that(swagger[\"paths\"], is_(equal_to({\n \"/foo/get\": {\n \"get\": {\n \"description\": \"My doc string\",\n \"tags\": [\"foo\"],\n \"responses\": {\n \"default\": {\n \"description\": \"An error occurred\", \"schema\": {\n \"$ref\": \"#/definitions/Error\",\n }\n },\n \"200\": {\n \"description\": \"My doc string\",\n \"schema\": {\n \"$ref\": \"#/definitions/QueryResult\",\n }\n }\n },\n \"parameters\": [\n {\n \"in\": \"header\",\n \"name\": \"X-Response-Skip-Null\",\n \"required\": False,\n \"type\": \"string\",\n \"description\": \"Remove fields with null values from the response.\"\n },\n {\n \"required\": False,\n \"type\": \"string\",\n \"name\": \"optional_value\",\n \"in\": \"query\",\n },\n {\n \"required\": True,\n \"type\": \"string\",\n \"name\": \"required_value\",\n \"in\": \"query\",\n },\n ],\n \"operationId\": \"query\",\n }\n }\n })))",
"def api(self) -> str:",
"def __init_subclass__(cls, **kwargs):\n super().__init_subclass__(**kwargs)\n\n if not __name__ == cls.__module__:\n # e.g.: cls.__module__ = mpcontribs.api.projects.views\n views_path = cls.__module__.split(\".\")\n doc_path = \".\".join(views_path[:-1] + [\"document\"])\n cls.tags = [views_path[-2]]\n doc_filepath = doc_path.replace(\".\", os.sep) + \".py\"\n if os.path.exists(doc_filepath):\n cls.doc_name = cls.tags[0].capitalize()\n Model = getattr(import_module(doc_path), cls.doc_name)\n cls.schema_name = cls.doc_name + \"Schema\"\n cls.Schema = type(\n cls.schema_name,\n (ModelSchema, object),\n {\n \"Meta\": type(\n \"Meta\",\n (object,),\n dict(model=Model, ordered=True, model_build_obj=False),\n )\n },\n )\n cls.definitions = {cls.schema_name: schema2jsonschema(cls.Schema)}\n cls.resource.schema = cls.Schema\n\n # write flask-mongorest swagger specs\n for method in cls.methods:\n spec = get_specs(cls, method, cls.tags[0])\n if spec:\n dir_path = os.path.join(DOC_DIR, cls.tags[0])\n file_path = os.path.join(dir_path, method.__name__ + \".yml\")\n if not os.path.exists(file_path):\n os.makedirs(dir_path, exist_ok=True)\n\n if is_gunicorn:\n with open(file_path, \"w\") as f:\n yaml.dump(spec, f)\n logger.debug(\n f\"{cls.tags[0]}.{method.__name__} written to {file_path}\"\n )",
"def use_in_api_documentation(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"use_in_api_documentation\")",
"def documentation():\n return auto.html()",
"def get_swagger_view(title=None, url=None, patterns=None, urlconf=None):\n class SwaggerSchemaView(APIView):\n _ignore_model_permissions = True\n exclude_from_schema = True\n permission_classes = [AllowAny]\n renderer_classes = [\n CoreJSONRenderer,\n renderers.OpenAPIRenderer,\n renderers.SwaggerUIRenderer\n ]\n\n def get(self, request):\n generator = SchemaGenerator(\n title=title,\n url=url,\n patterns=patterns,\n urlconf=urlconf\n )\n schema = generator.get_schema(request=request)\n\n if not schema:\n raise exceptions.ValidationError(\n 'The schema generator did not return a schema Document'\n )\n\n return Response(schema)\n\n return SwaggerSchemaView.as_view()",
"def __call__(self, func):\n func.__doc__ = self.doc\n return func",
"def wrapper(*args, **kwargs):\n print(f\"you are about to call {fn.__name__}\")\n print(f\"Here's the documentation: {fn.__doc__}\")\n return fn(*args, **kwargs)",
"def _rapidoc(request: HttpRequest) -> HttpResponse:\n return render(request, 'rapidoc.html', {\n 'schema': reverse('api:v2:schema'),\n })",
"def make_doc():\n doc_app = Flask(__name__)\n doc_app.register_blueprint(blueprint(no_doc=False))\n return doc_app",
"def view(self, **options: Any) -> Callable:\n\n def decorator(f):\n rule = \"/\"\n endpoint = options.pop(\"endpoint\", f.__name__)\n self.add_url_rule(rule, endpoint, f, **options)\n return f\n\n return decorator",
"def generate_apidoc_patches(self):\n base_path = self.paths[\"api_doc_dir\"]\n from django_swagger_utils.core.utils.mk_dirs import MkDirs\n MkDirs().mk_dir_if_not_exits(file_name=base_path + \"/\")\n\n from django_swagger_utils.apidoc_gen.generators.patch_generator import PatchGenerator\n\n patch_generator = PatchGenerator(self.app_name, self.parser, self.paths, base_path)\n # generating api docs\n patch_generator.generate_json_patch()",
"def get_documentation():\n return send_file(base_dir / \"static/documentation.html\", \"text/html; charset=UTF-8\")"
] | [
"0.7308593",
"0.67804843",
"0.67267275",
"0.6568308",
"0.6500626",
"0.6489433",
"0.64708114",
"0.64436257",
"0.63863075",
"0.6336464",
"0.6331381",
"0.6279588",
"0.6140457",
"0.6124011",
"0.6117498",
"0.6093394",
"0.60924906",
"0.60902476",
"0.60646194",
"0.6054501",
"0.6027014",
"0.60211873",
"0.6004288",
"0.5938166",
"0.59238946",
"0.59083325",
"0.5893017",
"0.5888402",
"0.5886886",
"0.5872275"
] | 0.6823912 | 1 |
Decorator to be used in apimethods to convert the requestdata to an instance of the passed `model`. This instance is passed to the decorated apiendpoint as the parameter `service_params`. | def api_inputmodel(api: str, model: BaseModel, servicename: str,
service_logger: logger) -> Callable:
def decorator(func):
@wraps(func)
async def function_wrapper(request, *args, **kwargs):
try:
service_params = model.parse_raw(request.body)
except ValidationError as err:
msg = (f'API: {api} - invalid params ({request.json}) passed '
f'to {servicename}: {err}')
service_logger.warning(msg)
raise PreconditionFailed(msg, status_code=412)
result = await func(request=request,
service_params=service_params,
service_logger=service_logger,
*args,
**kwargs)
return result
return function_wrapper
return decorator | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def api_outputmodel(api: str, model: BaseModel, servicename: str,\n service_logger: logger) -> Callable:\n\n def decorator(func):\n @wraps(func)\n async def function_wrapper(request, *args, **kwargs):\n service_result = await func(request, *args, **kwargs)\n try:\n if isinstance(service_result, model):\n result = service_result\n else:\n result = model(**service_result)\n output = response.json(result.dict())\n except Exception as err:\n msg = ('an internal error occured (service: '\n f'{servicename}, api: {api}): {err}')\n raise ServerError(msg)\n service_logger.info(f'processed result {result} => '\n f'{output.content_type} [{output.status}] '\n f'{output.body}')\n return output\n\n return function_wrapper\n\n return decorator",
"def opt_model_create_rest_api():\n request_json = request.get_json()\n OptimModelRequestAPI(request_json).validate()\n return create_model_data(request_json)",
"def model_to_instance(model):\n pass",
"def forward(self, *args: Any, **kwargs: Any) -> Any:\n return self.model(*args, **kwargs)",
"def param_converter(*decorator_args, **decorator_kwargs):\n def wrapped(fn):\n @wraps(fn)\n def decorated(*view_args, **view_kwargs):\n if Model is not None:\n view_kwargs = _convert_models(view_kwargs, decorator_kwargs)\n view_kwargs = _convert_query_params(view_kwargs, decorator_kwargs)\n return fn(*view_args, **view_kwargs)\n return decorated\n\n if decorator_args and callable(decorator_args[0]):\n return wrapped(decorator_args[0])\n return wrapped",
"def convert_to_model(self, *args):\n services_data, *_ = args\n return [Service(**service) for service in services_data]",
"def apply_model(model: BaseModel, **kwargs):\n raise NotImplementedError(f'Unknown model: {model}')",
"def __call__(self, x, **kwargs):\n return self.model(x)",
"def to_payload(self, model):\n return model",
"def model(self, key, model_type:T, default=undefined, description=None, **kwargs) -> T:\n return self._process(key, description=description, default=default, cast=cast_pydantic(model_type),type=model_type, **kwargs)",
"def _get_model(\n self,\n model: t.Type[api.ModelMixins],\n start: bool = True,\n auth: t.Optional[AuthModel] = None,\n ) -> t.Any:\n if start:\n self.start()\n\n if model in self.API_CACHE:\n return self.API_CACHE[model]\n\n if not isinstance(auth, AuthModel):\n auth = self.AUTH\n\n self.API_CACHE[model] = model(auth=auth, log_level=self.API_LOG_LEVEL)\n return self.API_CACHE[model]",
"def decorator(cls):\n\n instance = cls(*args, **kwargs)\n serializer_services.register_serializer(instance, **kwargs)\n\n return cls",
"def convert_to_model(self, *args):",
"def from_model(model):\n ret = model2json(model)\n return JsonBody(ret['body'])",
"def api_method(func):\n @wraps(func)\n def decorator(self, return_request_args=False, *args, **kwargs):\n request_args = func(self, *args, **kwargs)\n request_args.update({\n 'method': '{module}.{method}'.format(\n module=self.__class__.__name__,\n method=func.__name__)})\n request_args = self._preprocess(request_args)\n if return_request_args:\n return request_args\n else:\n return self.pa.request(**request_args)\n return decorator",
"def get_model(self, payload):\n return super(BulkEntryTransformer, self).to_model(payload)",
"def get_model_instance(model,baseclass=None,nvarparams=1,**kwargs):\n if isinstance(model,ParametricModel if baseclass is None else baseclass):\n for k,v in kwargs.iteritems():\n setattr(model,k,v)\n return model\n else:\n cls = get_model_class(model,baseclass)\n args = (nvarparams,) if cls.isVarnumModel() else tuple()\n return cls(*args,**kwargs)",
"def to_api_data(self):\n raise NotImplementedError()",
"def api_documentation(api: str, summary: str, in_model: BaseModel,\n out_model: BaseModel, out_description: str) -> Callable:\n for model, name in ((in_model, 'Input'), (out_model, 'Output')):\n doc.Object(\n make_dataclass(\n f'Api{api[1:].title()}{name}',\n [(key, val.type_, val.type_)\n for key, val in model.__dict__['__fields__'].items()]))\n im_returns = doc.JsonBody({\n key: val.type_\n for key, val in in_model.__dict__['__fields__'].items()\n })\n\n om_returns = {\n key: val.type_\n for key, val in out_model.__dict__['__fields__'].items()\n }\n\n def decorator(func):\n @doc.summary(summary)\n @doc.response(412,\n 'Error: Precondition Failed',\n description='The passed request-parameters are invalid')\n @doc.response(500,\n 'Error: Server-Error occured',\n description='An internal error occured')\n @doc.consumes(im_returns,\n content_type='application/json',\n location='body')\n @doc.produces(om_returns,\n content_type='application/json',\n description=out_description)\n @wraps(func)\n async def function_wrapper(request, *args, **kwargs):\n return await func(request=request, *args, **kwargs)\n\n return function_wrapper\n\n return decorator",
"def __init__(self, model: Type[ModelType]):\n self.model = model",
"def __init__(self, model: Type[ModelType]):\n self.model = model",
"def update(self, datastore, model, **kwargs):\n for k, v in self._preprocess_params(kwargs).items():\n setattr(model, k, v)\n self.save(datastore, model)\n return model",
"def api_field_from_django_field(cls, f, default=CharField):\n if isinstance(f, JSONField):\n return JSONApiField\n \n return super(PandaModelResource, cls).api_field_from_django_field(f, default)",
"def to_model(self, payload):\n if self.skip:\n raise SkipField\n\n model = self.get_or_initialize_model(payload)\n model = self.update_model_fields(model, payload)\n return model",
"def json_to_model(cls, data):\n m = cls.to_model(data)\n m.raw = data\n cls._unlock_unmarshalling(m)\n cls.set_additional_fields(m, data)\n return m",
"def model(self) -> Type[Model]:",
"def _create_response_model(self, data):\n pass",
"def update(self, model, **kwargs):\n self._isinstance(model)\n for k, v in self._preprocess_params(kwargs).items():\n setattr(model, k, v)\n self.save(model)\n return model",
"def __init__(self, endpoint_name, sagemaker_session=None,\n serializer=json_serializer,\n deserializer=json_deserializer,\n content_type=None,\n model_name=None,\n model_version=None):\n super(Predictor, self).__init__(endpoint_name, sagemaker_session, serializer,\n deserializer, content_type)\n\n attributes = []\n if model_name:\n attributes.append('tfs-model-name={}'.format(model_name))\n if model_version:\n attributes.append('tfs-model-version={}'.format(model_version))\n self._model_attributes = ','.join(attributes) if attributes else None",
"def proxy(self, modelcls):\n return ModelProxy(self, modelcls)"
] | [
"0.6261429",
"0.61969465",
"0.5975331",
"0.56145257",
"0.5604642",
"0.55971265",
"0.558913",
"0.5562619",
"0.54409605",
"0.5427529",
"0.5384895",
"0.53138745",
"0.531021",
"0.5299018",
"0.52693087",
"0.52421254",
"0.5187899",
"0.5182848",
"0.51826674",
"0.5159831",
"0.5159831",
"0.5132555",
"0.51257706",
"0.51168",
"0.5110996",
"0.5110925",
"0.5107068",
"0.5103126",
"0.5091716",
"0.5048244"
] | 0.7151013 | 0 |
Decorator to be used in apimethods to convert the responsedata of the decorated apimethod to a json based on the passed `model`. | def api_outputmodel(api: str, model: BaseModel, servicename: str,
service_logger: logger) -> Callable:
def decorator(func):
@wraps(func)
async def function_wrapper(request, *args, **kwargs):
service_result = await func(request, *args, **kwargs)
try:
if isinstance(service_result, model):
result = service_result
else:
result = model(**service_result)
output = response.json(result.dict())
except Exception as err:
msg = ('an internal error occured (service: '
f'{servicename}, api: {api}): {err}')
raise ServerError(msg)
service_logger.info(f'processed result {result} => '
f'{output.content_type} [{output.status}] '
f'{output.body}')
return output
return function_wrapper
return decorator | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def json_response(func):\n\t@wraps(func)\n\tdef decorated_view(*args, **kwargs):\n\t\tdata = func(*args, **kwargs)\n\t\tdata = json.dumps(data)\n\t\tresponse = make_response(data)\n\t\tresponse.headers['Content-Type'] = 'application/json'\n\t\treturn response\n\treturn decorated_view",
"def json_response(func):\n def decorator(request, *args, **kwargs):\n objects = func(request, *args, **kwargs)\n if isinstance(objects, HttpResponse):\n return objects\n try:\n data = simplejson.dumps(objects)\n if 'callback' in request.GET:\n data = '%s(%s);' % (request.GET['callback'], data)\n except:\n data = simplejson.dumps(str(objects))\n if 'just_the_json_plz' in kwargs:\n return data\n if 'just_the_data_plz' in kwargs:\n return objects\n if 'callback' in request.GET or 'callback' in request.POST:\n #jsonp\n return HttpResponse(data, \"text/javascript\")\n else:\n #json\n return HttpResponse(data, \"application/json\")\n return decorator",
"def as_json(func):\n @functools.wraps(func)\n def inner(*args, **kwargs):\n json_response = _as_json(func(*args, **kwargs))\n if isinstance(json_response, tuple):\n response, code = json_response\n if isinstance(response, GenericYetiError):\n return jsonify({response.type: response.message}), code\n return jsonify(response), code\n return jsonify(json_response)\n return inner",
"def from_model(model):\n ret = model2json(model)\n return JsonBody(ret['body'])",
"def json_response(func):\n def wrapper(*args, **kwargs):\n try:\n ret_val = func(*args, **kwargs)\n if isinstance(ret_val, dict):\n result = {\"code\": 0, \"msg\": \"\", \"data\": ret_val}\n return JsonResponse(result)\n else:\n result = {\"code\": -20002, \"msg\": u\"视图函数返回值类型必须是字典\"}\n return JsonResponse(result)\n\n except Exception as err:\n logger.exception(\"func name: %s, error: %s\" % (func.__name__, err))\n result = {\"code\": -20001, \"msg\": str(err)}\n return JsonResponse(result)\n return wrapper",
"def handle_model_request(model_name):\n if model_name in app.models:\n return (\n json.dumps(app.models[model_name][\"data\"]), 200,\n {'ContentType': 'application/json'}\n )\n else:\n return _respond_not_found()",
"def response_json(func):\n\n @wraps(func)\n def set_response(*args, **kwargs):\n res = func(*args, **kwargs)\n if type(res) is not dict:\n return res\n else:\n return Response(json.dumps(res), content_type=\"application/json; charset=utf-8\")\n return set_response",
"def jsonify(function):\n @wraps(function)\n def inner(*args, **kwargs):\n \"\"\"\n This docstring will be overridden by @wraps decorator.\n \"\"\"\n return Response(\n dumps(function(*args, **kwargs)),\n mimetype='application/json'\n )\n return inner",
"def jsonify(func):\n\n @functools.wraps(func)\n def convert(*args, **kwargs):\n\n success = True\n code = 200 # default status code - success!\n\n try:\n result = func(*args, **kwargs)\n\n if isinstance(result, BaseResponse):\n return result\n\n except exc.HTTPException as ex:\n # i'd like to be able to just re-raise e here, but the body of the\n # response is e.get_body() instead of e.description - so we have to\n # just set up the response ourselves\n result = { 'message' : ex.description }\n code = ex.code\n\n except Exception as ex:\n result = { 'message' : 'Internal Server Error', 'system_message' : ex.message }\n code = 500\n\n # build a response object, and change the content type header to json\n response = make_response(json.dumps(result))\n response.headers['Content-Type'] = 'application/json'\n response.status_code = code\n\n return response\n\n # return the function that is taking the place of (or masquerading as) our decorated function\n return convert",
"def create_json_from_model(self):\n json = {\n \"enableAutoReply\": self.enable_auto_reply,\n \"responseSubject\": self.response_subject,\n \"responseBodyPlainText\": self.response_body_plain_text,\n \"responseBodyHtml\": self.response_body_html,\n \"restrictToContacts\": self.restrict_to_contacts,\n \"restrictToDomain\": self.restrict_to_domain,\n \"startTime\": self.start_time,\n \"endTime\": self.end_time\n }\n return json",
"def json_response(func):\n def decorator(request, *args, **kwargs):\n objects = func(request, *args, **kwargs)\n if isinstance(objects, HttpResponse):\n return objects\n try:\n data = json.dumps(objects, default=json_serialize)\n if 'callback' in request.REQUEST:\n # a jsonp response!\n data = '%s(%s);' % (request.REQUEST['callback'], data)\n return HttpResponse(data, \"text/javascript\")\n except Exception as e:\n print (e)\n data = json.dumps(str(objects))\n return HttpResponse(data, \"application/json\")\n return decorator",
"def json_friendly(self):",
"def json_response(func):\n\n def decorator(request, *args, **kwargs):\n objects = func(request, *args, **kwargs)\n if isinstance(objects, HttpResponse):\n return objects\n try:\n data = simplejson.dumps(objects)\n if 'callback' in request.REQUEST:\n # a jsonp response!\n data = '%s(%s);' % (request.REQUEST['callback'], data)\n return HttpResponse(data, \"text/javascript\")\n except:\n data = simplejson.dumps(str(objects))\n return HttpResponse(data, \"application/json\")\n\n return decorator",
"def json_response(func):\n def decorator(request, *args, **kwargs):\n objects = func(request, *args, **kwargs)\n if isinstance(objects, HttpResponse):\n return objects\n try:\n data = simplejson.dumps(objects)\n if 'callback' in request.REQUEST:\n # a jsonp response!\n data = '%s(%s);' % (request.REQUEST['callback'], data)\n return HttpResponse(data, \"text/javascript\")\n except:\n data = simplejson.dumps(str(objects))\n return HttpResponse(data, \"application/json\")\n return decorator",
"def json_decorator(f):\n def decorator(*args, **kwargs):\n return jsonify(f(*args, **kwargs))\n return decorator",
"def jsonify(obj):\n raise NotImplementedError",
"def json_response(func):\n def decorator(request, *args, **kwargs):\n objects = func(request, *args, **kwargs)\n if isinstance(objects, HttpResponse):\n return objects\n\n data = json.dumps(objects)\n if 'callback' in request:\n # a jsonp response!\n data = '%s(%s);' % (request['callback'], data)\n return HttpResponse(data, \"text/javascript\")\n\n return HttpResponse(data, \"application/json\")\n return decorator",
"def jsonify(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n result = f(*args, **kwargs)\n data = json.dumps(result, indent=None if request.is_xhr else 2)\n return app.response_class(data, mimetype='application/json')\n return decorated_function",
"def jsonify(func, *args, **kwargs): \n adict = func(*args, **kwargs)\n if not isinstance(adict, dict):\n return adict\n \n \n #: getting updates from session and database\n \n updates = list(session['callback_updates']) \n updates.extend(models.CallbackUpdate.dump())\n \n if updates:\n if not adict.get('type') == 'composite':\n adict = beans._wrap('composite', [adict]) \n \n adict['result'].extend(updates)\n \n json = simplejson.dumps(adict)\n response = make_response(json) \n response.headers['Content-Type'] = 'application/json'\n session['callback_updates'] = []\n db.session.commit() \n return response",
"def response_json(func):\n def wrapper(request):\n try:\n return get_json_response(func(request))\n except Exception as ex:\n return get_json_response({\n \"status\": \"error\",\n \"error_info\": str(ex),\n \"trace_back\": traceback.format_exc()\n })\n\n return wrapper",
"def json(data, *args, **kwargs):\n return HttpResponseBehaviour(JsonResponse, data, *args, **kwargs)",
"def json_service(f):\n\n @wraps(f)\n def decorated_function(*args, **kwargs):\n try:\n results = f(*args, **kwargs)\n if results is None:\n results = {}\n if not isinstance(results, dict):\n results = {'data': results}\n if 'success' not in results:\n results['success'] = True\n return jsonify(results)\n except Exception as e:\n print \"error in: \", f.__name__\n print traceback.print_exc()\n return jsonify({'success': False, 'error': str(e)})\n\n return decorated_function",
"def get_json_response(obj):\n return HttpResponse(json.dumps(obj))",
"def get_json_response(obj):\n return HttpResponse(json.dumps(obj))",
"def model_to_json(model: Base) -> Dict[str, Any]:\n json = {}\n for col in model.__mapper__.attrs.keys(): # type: ignore\n if col != \"hashed_password\" and col != \"salt\":\n if col in datetime_cols:\n # Cast datetime object to string\n json[col] = str(getattr(model, col))\n else:\n json[col] = getattr(model, col)\n return json",
"def model_json(request, method, object_id):\n model_object = ImageSelector.objects.filter(id=object_id)\n methods = ['get_prev_front_sibling', 'get_next_front_sibling', 'get_next_cutted_siblings',\n 'get_prev_cutted_siblings', 'get_next_cutted_front_siblings',\n 'get_prev_cutted_front_siblings']\n if len(model_object) == 0 or method not in methods:\n response_dic = {'object': 'not found!'}\n else:\n model_object = model_object[0]\n if method == 'get_prev_front_sibling':\n response_data = [model_object.get_prev_front_sibling()]\n elif method == 'get_next_front_sibling':\n response_data = [model_object.get_next_front_sibling()]\n elif method == 'get_next_cutted_siblings':\n response_data = model_object.get_next_cutted_siblings()\n elif method == 'get_prev_cutted_siblings':\n response_data = model_object.get_prev_cutted_siblings()\n elif method == 'get_next_cutted_front_siblings':\n response_data = model_object.get_next_cutted_front_siblings()\n elif method == 'get_prev_cutted_front_siblings':\n response_data = model_object.get_prev_cutted_front_siblings()\n\n response_dic = []\n if response_data:\n for response in response_data:\n if response.main_image:\n image_id = response.main_image.id\n response_dic.append({'page_id': response.id, 'image_id': image_id})\n\n response_data = json.dumps(response_dic)\n # return an HttpResponse with the JSON and the correct MIME type\n return HttpResponse(response_data, content_type='application/json')",
"def __json_call(self, method, data):\n headers = self.headers\n headers['Content-type'] = 'application/json'\n data = json.dumps(data)\n return self.__call(headers, method, data)",
"def handle_models_request():\n # TODO: add sort and filter by creation/modification date\n return (\n json.dumps({\"models\": {\n k: d[\"data\"] for k, d in app.models.items()\n }}), 200,\n {'ContentType': 'application/json'}\n )",
"def wrap_response(f):\n @wraps(f)\n def wrapped_f(*args, **kwargs):\n resp = f(*args, **kwargs)\n if isinstance(resp, Response):\n return resp\n elif isinstance(resp, list):\n return jsonify({'data': resp})\n elif hasattr(resp,'to_json'):\n return jsonify(resp.to_json())\n else:\n return jsonify(resp)\n return wrapped_f",
"def json_response(obj):\n return HttpResponse(json.dumps(obj), content_type=\"application/json\")"
] | [
"0.6303589",
"0.6233411",
"0.6158551",
"0.61566204",
"0.61502767",
"0.6142237",
"0.6134618",
"0.60121006",
"0.60005003",
"0.5990344",
"0.59364825",
"0.59224707",
"0.59192204",
"0.5905445",
"0.58905154",
"0.58634555",
"0.5848181",
"0.5831524",
"0.58291614",
"0.58150035",
"0.5784024",
"0.57303464",
"0.5703129",
"0.5703129",
"0.5699096",
"0.5698261",
"0.566989",
"0.56070447",
"0.55946916",
"0.5593708"
] | 0.6607163 | 0 |
Update attempt to update branch to the given SHA. | def update_branch(self, name, sha):
branch_info = {
'sha': sha,
}
resp = self.patch('git/refs/heads/{}'.format(name), json=branch_info)
try:
resp.raise_for_status()
except Exception:
logger.error(resp.json())
raise
return resp.json() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_latest_branch (product, which, main_branch):\n\n name = \"Latest_ACE7TAO3_\" + which\n\n vprint ('Fast-forwarding', name, 'to', main_branch)\n ex (\"cd $DOC_ROOT/\" + product + \" && git fetch . \" + main_branch + \":\" + name)",
"def update(self, branch=None):\n if branch is None:\n branch = self.branch\n\n print \"*** Updating to branch '%s'\" % branch\n commands.pull(ui.ui(), self._repository, self.url)\n commands.update(ui.ui(), self._repository, None, branch, True)",
"def update(self, rev = 'HEAD'):\r\n self._authsvn('up', ['-r', rev])",
"def update(repository, args, **_):\n _log(repository, 'INFO', \"Going to build commit %s\" % args[2][:7])",
"def reset_branch(ctx, name, sha, hard):\n\n try:\n\n gh = ctx.obj.github\n\n log.echo(\"Updating {} branch...\".format(name), break_line=False)\n gh.reset_branch(name=name, sha=sha, hard=hard)\n log.echo('Branch {} is now at {} '.format(name, sha), break_line=False)\n log.checkmark()\n except BaseException as _:\n log.xmark()\n raise",
"def update():\n call('git -C ~/norminette+ pull', shell=True)",
"def verify_up_to_date(path, branch=\"master\"):\n\n sys.stdout.write(\" - Verifying your branch up to date:\")\n run_in_component(path, ['git', 'remote', 'update'])\n\n result = run_in_component(path, ['git', 'rev-list', 'HEAD...origin/%s' % branch, '--count'])\n count = int(result.strip())\n\n if count == 0:\n print(\" OKAY\")\n return\n\n print(\" FAILED\")\n\n raise GenericError(\"You branch is not up-to-date with remote branch: %d different commits\" % count)",
"def update(context, user=get_local_user(), remote=False, instance=None, branch=BRANCH):\n no_stack = None\n no_compose = False\n\n command = f\"git checkout {branch} || git pull && git checkout {branch}\"\n run_command(context, user, remote, instance, no_stack, command, no_compose)\n\n command = \"git pull\"\n run_command(context, user, remote, instance, no_stack, command, no_compose)",
"def update_stable(path, sha_list, origin):\n\n conn = sqlite3.connect(rebasedb)\n c = conn.cursor()\n\n cmd = ['git', '-C', path, 'log', '--no-merges', '--abbrev=12', '--oneline',\n '--reverse', sha_list]\n commits = subprocess.check_output(cmd, encoding='utf-8', errors='ignore')\n\n for commit in commits.splitlines():\n if commit != '':\n elem = commit.split(' ')[:1]\n sha = elem[0]\n c.execute(\"select sha from stable where sha is '%s'\" % sha)\n found = c.fetchall()\n if found == []:\n c.execute('INSERT INTO stable(sha, origin) VALUES (?, ?)', (\n sha,\n origin,\n ))\n\n conn.commit()\n conn.close()",
"def update_from_repo():\n\treturn",
"def update_base_branch(self):\n # Make sure base branch is up to date\n print(\"Checking out base branch '{}'...\".format(self.base_branch))\n self.git.checkout(self.base_branch)\n print('Updating base branch...')\n self.git.pull('--rebase')",
"def sha(self, sha):\n\n self._sha = sha",
"def update_version():\n version = os.environ.get('TRAVIS_COMMIT', None) or \\\n subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'])\n version_file = path.join('slingsby', 'VERSION')\n with open(version_file, 'w') as fh:\n fh.write(version)",
"def set_git_sha(context, sha):\n context.sha = sha",
"def update_repo_cli(api_client, repo_id, branch, tag, path):\n id_from_param_or_path = (repo_id if repo_id is not None\n else ReposApi(api_client).get_repo_id(path))\n content = ReposApi(api_client).update(id_from_param_or_path, branch, tag)\n click.echo(pretty_format(content))",
"def pull_nightly_version(spdir):\n nightly_version = _nightly_version(spdir)\n cmd = [\"git\", \"merge\", nightly_version]\n p = subprocess.run(cmd, check=True)",
"def push_latest_branch (product, which, main_branch):\n\n name = \"Latest_ACE7TAO3_\" + which\n\n if opts.push:\n vprint (\"Pushing branch\", name)\n ex (\"cd $DOC_ROOT/\" + product + \" && git push origin refs/heads/\" + name,\n allow_fail=True)",
"def update(dest, branch=None, revision=None):\n # If we have a revision, switch to that\n if revision is not None:\n cmd = ['hg', 'update', '-C', '-r', revision]\n run_cmd(cmd, cwd=dest)\n else:\n # Check & switch branch\n local_branch = get_output(['hg', 'branch'], cwd=dest).strip()\n\n cmd = ['hg', 'update', '-C']\n\n # If this is different, checkout the other branch\n if branch and branch != local_branch:\n cmd.append(branch)\n\n run_cmd(cmd, cwd=dest)\n return get_revision(dest)",
"def checkout_nightly_version(branch, spdir):\n nightly_version = _nightly_version(spdir)\n cmd = [\"git\", \"checkout\", \"-b\", branch, nightly_version]\n p = subprocess.run(cmd, check=True)",
"def update_branch(branch, repo, options):\n update = None\n\n remote = repo.get_merge_branch(branch)\n if not remote:\n gbp.log.warn(\"No branch tracking '%s' found - skipping.\" % branch)\n return False\n\n can_fast_forward, up_to_date = repo.is_fast_forward(branch, remote)\n\n if up_to_date: # Great, we're done\n gbp.log.info(\"Branch '%s' is already up to date.\" % branch)\n return True\n\n if can_fast_forward:\n update = 'merge'\n else:\n if options.force == 'merge':\n gbp.log.info(\"Non-fast forwarding '%s' due to --force=merge\" % branch)\n update = 'merge'\n elif options.force == 'clean':\n gbp.log.info(\"Checking out clean copy of '%s' due to --force=clean\" % branch)\n update = 'clean'\n else:\n gbp.log.warn(\"Skipping non-fast forward of '%s' - use --force or \"\n \"update manually\" % branch)\n\n if update:\n gbp.log.info(\"Updating '%s'\" % branch)\n if repo.branch == branch:\n if update == 'merge':\n repo.merge(remote)\n elif update == 'clean':\n # Have to drop our current branch\n tmpbranch = \"_gbptmp-\"+branch\n gbp.log.debug(\"Checking out '%s' to '%s'\" % (remote, tmpbranch))\n repo.create_branch(tmpbranch, remote)\n gbp.log.debug(\"Switching current branch to '%s'\" % (tmpbranch))\n repo.set_branch(tmpbranch)\n gbp.log.debug(\"Dropping branch '%s'\" % branch)\n repo.delete_branch(branch)\n gbp.log.info(\"Renaming branch '%s' to '%s'\" % (tmpbranch, branch))\n repo.rename_branch(tmpbranch, branch)\n else:\n if can_fast_forward or (update == 'clean'):\n sha1 = repo.rev_parse(remote)\n repo.update_ref(\"refs/heads/%s\" % branch, sha1,\n msg=\"gbp: forward %s to %s\" % (branch, remote))\n elif update == 'merge':\n # Merge other branch, if it cannot be fast-forwarded\n current_branch=repo.branch\n repo.set_branch(branch)\n repo.merge(remote)\n repo.set_branch(current_branch)\n\n return (update != None)",
"def update_changelog(package_id: str, base_branch: str, verbose: bool):\n if _update_changelog(package_id, base_branch, verbose, True):\n sys.exit(64)",
"def update_ref(ref, value):\n subprocess.check_call([\"git\", \"update-ref\", ref, value])",
"def sha(location, rev):\n ensure_dir(location)\n with utils.cd(location):\n cmd = '/usr/bin/git rev-parse --verify {}'.format(rev)\n return subprocess.check_output(cmd, shell=True).strip()",
"def bump_upstream_repos_shas(path):\n filelist = find_yaml_files(path)\n for filename in filelist:\n print(\"Working on %s\" % filename)\n bump_upstream_repos_sha_file(filename)",
"def git_update(path: Path, repo: str, tag: str = None):\n GITEXE = shutil.which(\"git\")\n\n if not GITEXE:\n logging.error(\"Git not available.\")\n return\n\n if path.is_dir():\n subprocess.check_call([GITEXE, \"-C\", str(path), \"pull\"])\n else:\n # shallow clone\n if tag:\n subprocess.check_call(\n [\n GITEXE,\n \"clone\",\n repo,\n \"--depth\",\n \"1\",\n \"--branch\",\n tag,\n \"--single-branch\",\n str(path),\n ]\n )\n else:\n subprocess.check_call([GITEXE, \"clone\", repo, \"--depth\", \"1\", str(path)])",
"def _update_head(self, index_entry, branch, new_id):\r\n index_entry['versions'][branch] = new_id\r\n self.db_connection.update_course_index(index_entry)",
"def checkout_latest():\n with cd(env.repo_path):\n run('git checkout %(branch)s;' % env)\n run('git pull origin %(branch)s' % env)",
"def commit(self, sha):\r\n return repocommits.RepoCommit(self, sha)",
"def main(branch):\n try:\n # Ensure that we're in a git repository. This command is silent unless\n # you're not actually in a git repository, in which case, you receive a\n # \"Not a git repository\" error message.\n output = subprocess.check_output(['git', 'rev-parse']).decode('utf-8')\n sys.stdout.write(output)\n except subprocess.CalledProcessError:\n # Bail if we're not in a git repository.\n return\n\n # This behavior ensures a better user experience for those that aren't\n # intimately familiar with git.\n ensure_remote_branch_is_tracked(branch)\n\n # Switch to the specified branch and update it.\n subprocess.check_call(['git', 'checkout', '--quiet', branch])\n\n # Pulling is always safe here, because we never commit to this branch.\n subprocess.check_call(['git', 'pull', '--quiet'])\n\n # Checkout the top commit in the branch, effectively going \"untracked.\"\n subprocess.check_call(['git', 'checkout', '--quiet', '%s~0' % branch])\n\n # Clean up the repository of Python cruft. Because we've just switched\n # branches and compiled Python files should not be version controlled,\n # there are likely leftover compiled Python files sitting on disk which may\n # confuse some tools, such as sqlalchemy-migrate.\n subprocess.check_call(['find', '.', '-name', '\"*.pyc\"', '-delete'])\n\n # For the sake of user experience, give some familiar output.\n print('Your branch is up to date with branch \\'origin/%s\\'.' % branch)",
"def bump_version(ctx, branch, semantic):\n\n try:\n log.echo('Bumping version...', break_line=False)\n bump = ctx.obj.github.bump_version(branch=branch, semantic=semantic)\n log.checkmark()\n log.echo('Bumped version from {} to {}'.format(bump.prev_version, bump.next_version))\n except BaseException as _:\n log.xmark()\n raise"
] | [
"0.62394434",
"0.62033707",
"0.58707505",
"0.5823983",
"0.57316476",
"0.5548735",
"0.54924744",
"0.5490744",
"0.54566896",
"0.53568214",
"0.533697",
"0.53313905",
"0.5318119",
"0.5305053",
"0.52909243",
"0.52125496",
"0.5172792",
"0.51444185",
"0.5125727",
"0.5123589",
"0.5117004",
"0.51135176",
"0.5107081",
"0.50763315",
"0.5057646",
"0.5046271",
"0.5030426",
"0.50106573",
"0.49981362",
"0.49795562"
] | 0.70466286 | 0 |
Verify that the release is actually read to be released If the release is new (corresponds to a release branch), then we check that the release is merged into master. If we can not find the release branch, we assume that it is a hotfix and we verify that the major version number matches the latest release. | def check_release_status(self, release_name, release_branch):
logger.debug('GitHubAPI.check_release_status args: {}; {}'.format(
release_name, release_branch)
)
release_version = extract_release_branch_version(release_name)
release_branch_base = build_release_base_name(get_config())
# Assume that this is a new release
# Check if the release branch is merged into master
try:
merge_status = self.compare(
'master',
release_branch
).get('status')
except requests.exceptions.HTTPError as e:
logger.debug('HTTPError: {}'.format(e.message))
if not e.response.status_code == 404:
raise e
else:
# can be one of diverged, ahead, behind, identical according to
# http://stackoverflow.com/a/23969867
if merge_status in ['diverged', 'ahead']:
raise Exception(
'Release must be merged into master before release')
return
# if the release branch does not exist, then we end up here,
# Assume that it is a hotfix
raw_version = self.latest_release().get('name', '')
if raw_version.startswith(release_branch_base):
raw_version = raw_version[len(release_branch_base):]
version = extract_year_week_version(raw_version)
logger.debug(version)
if extract_year_week_version(release_version) != version:
raise Exception(
'New release version does not match the current release, '
'we expected a hotfix.'
)
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_release_branch():\n diff_string_config_yml = run_command(\"git diff origin/master .circleci/config.yml\")\n if re.search(r'[+-][ ]+CONTENT_VERSION: \".*', diff_string_config_yml):\n return True\n\n return False",
"def verify_tags(git_ref_target):\n latest_release = github_util.get_latest_release().get('name')\n latest_commit = run('git rev-list -n 1 {}'.format(latest_release)).stdout.rstrip(\"\\r\\n\")\n if not branch_check(latest_release, git_ref_target):\n print('Your branch does not contain the latest production code. \\n\\\n Please recreate it by branching off of release {}.'.format(latest_release))\n exit(1)\n else:\n print(\"Branch contains the latest production tag\")\n fork_point = run('git merge-base remotes/origin/master remotes/origin/{}'.format(git_ref_target))\n commits_since_fork = run('git rev-list --branches={} {}^..HEAD'.format(git_ref_target,\n fork_point.stdout.rstrip(\"\\r\\n\")))\n if latest_commit not in commits_since_fork.stdout:\n print('Your branch did not fork directly from the last production tag. \\n\\\n Please recreate it by branching off of release {}.'.format(latest_release))\n exit(1)\n else:\n print('Latest production tag is between the fork point and HEAD')",
"def is_0_release(release: str) -> bool:\n if release == \"current_branch\":\n return False\n version = packaging.version.parse(release)\n return version < packaging.version.Version(\"1.0\")",
"def version_check():\n try:\n with open('git.json', 'r') as fp:\n git_md = json.loads(fp.read())\n except IOError:\n # In the event that there is no git metadata, just print null values\n # twice.\n print \"null\"\n print \"null\"\n return\n\n if git_md['GitHub']:\n if git_md['GitHubUser'] is not None and git_md[\n 'GitHubRepo'] is not None:\n latest_release = json.loads(\n urllib2.urlopen(\n \"https://api.github.com/repos/%s/%s/releases/latest\" % (\n git_md['GitHubUser'], git_md['GitHubRepo'])).read())\n latest_tag = latest_release['tag_name']\n\n # Go through all of the tags to see if this commit matches a tag.\n tags = json.loads(\n urllib2.urlopen(\n \"https://api.github.com/repos/%s/%s/git/refs/tags\" % (\n git_md['GitHubUser'], git_md['GitHubRepo'])).read())\n\n current_tag = \"Unreleased\"\n for tag in tags:\n if tag['object']['sha'] == git_md['GitSHA']:\n current_tag = tag['ref'].split('/')[-1]\n\n print current_tag\n print latest_tag\n else:\n print \"MissingGitHubDetails\"\n print \"MissingGitHubDetails\"\n else:\n # In the event that there is a git file, but it doesn't indicate GitHub\n # then just print some stuff indicating that.\n print \"NonGitHub\"\n print \"NonGitHub\"",
"def test_release_tag(self) -> None:\n self.assertEqual(\"v3.14.15\", release_tag())",
"def test_master_versions(self):\n m = self.d.master(4242)\n r = self.d.release(79)\n v = m.versions\n\n self.assertEqual(len(v), 2)\n self.assertTrue(r in v)\n self.assertEqual(r.master, m)\n\n r2 = self.d.release(3329867)\n self.assertTrue(r2.master is None)",
"def is_release():\n return VERSION[-1]",
"def is_release(self):\n # version string: N.N.N.N is for release.\n return bool(re.match(r'^[\\d.]+$', self.version))",
"def test_release_tag_for_dev_version(self) -> None:\n self.assertEqual(\"v42.12\", release_tag())",
"def test_release(self):\n runCommand(\n [\"git\", \"checkout\", \"-b\", \"release-16.11111-9001\"], cwd=self.repo.path\n )\n\n somefile = self.repo.child(\"somefile\")\n somefile.setContent(b\"change\")\n\n runCommand([\"git\", \"add\", somefile.path, somefile.path], cwd=self.repo.path)\n runCommand([\"git\", \"commit\", \"-m\", \"some file\"], cwd=self.repo.path)\n\n logs = []\n\n with self.assertRaises(SystemExit) as e:\n CheckNewsfragmentScript(logs.append).main([self.repo.path])\n\n self.assertEqual(e.exception.args, (0,))\n self.assertEqual(logs[-1], \"Release branch with no newsfragments, all good.\")",
"def test_release_update_available_MAJOR(self):\n NEXT = '%d.%d-%d' % (MAJOR + 1, 0, 0)\n self._uri({\n '%d.%d/maintained/%s/all/Packages.gz' % (MAJOR + 1, 0, NEXT): DATA,\n })\n next = self.u.release_update_available()\n self.assertEqual(NEXT, next)",
"def test_pre_release(scraper, version_parts):\n\n new_version_parts = list(version_parts)\n if len(new_version_parts) > 4:\n new_version_parts[4] = int(new_version_parts[4]) + 1\n elif len(new_version_parts) > 3:\n new_version_parts.append(1)\n else:\n new_version_parts.extend(['a', 1])\n\n assert scraper.is_compatible_with(generate_version(new_version_parts)) is True",
"def check_if_release_version_ok(\n past_releases: list[ReleaseInfo],\n current_release_version: str,\n) -> tuple[str, str | None]:\n previous_release_version = past_releases[0].release_version if past_releases else None\n if current_release_version == \"\":\n if previous_release_version:\n current_release_version = previous_release_version\n else:\n current_release_version = (datetime.today() + timedelta(days=5)).strftime(\"%Y.%m.%d\")\n if previous_release_version:\n if Version(current_release_version) < Version(previous_release_version):\n console.print(\n f\"[red]The release {current_release_version} must be not less than \"\n f\"{previous_release_version} - last release for the package[/]\"\n )\n raise Exception(\"Bad release version\")\n return current_release_version, previous_release_version",
"def test_version_check_update_available(self):\n output = self.run_command(\"selfupdate --check bennr01:selfupdate_test_future\", exitcode=0)\n self.assertIn(\"Target: bennr01:selfupdate_test_future\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)\n self.assertNotIn(\"Already at latest version\", output)\n self.assertIn(\"New version available\", output)\n self.assertNotIn(\"Error: \", output)",
"def test_release_update_available_PATCH(self):\n NEXT = '%d.%d-%d' % (MAJOR, MINOR, PATCH + 1)\n self._uri({\n '%d.%d/maintained/%s/all/Packages.gz' % (MAJOR, MINOR, NEXT): DATA,\n })\n next = self.u.release_update_available()\n self.assertEqual(NEXT, next)",
"def _is_version_uptodate(self):\n logging.info(\"Checking tesseract version\")\n cmd = '%s -v' % (self.binary)\n logging.info(cmd) \n try:\n ret_output = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)\n except CalledProcessError:\n # Could not run tesseract\n error(self.msgs['TS_MISSING'])\n\n ver_str = '0.0.0'\n for line in ret_output.splitlines():\n if 'tesseract' in line:\n ver_str = line.split(' ')[1]\n if ver_str.endswith('dev'): # Fix for version strings that end in 'dev'\n ver_str = ver_str[:-3]\n\n # Iterate through the version dots\n ver = [int(x) for x in ver_str.split('.')]\n req = [int(x) for x in self.required.split('.')]\n\n # Aargh, in windows 3.02.02 is reported as version 3.02 \n # SFKM\n if str(os.name) == 'nt':\n req = req[:2]\n\n version_good = False\n for i,num in enumerate(req):\n if len(ver) < i+1:\n # This minor version number is not present in tesseract, so it must be\n # lower than required. (3.02 < 3.02.01)\n break\n if ver[i]==num and len(ver) == i+1 and len(ver)==len(req):\n # 3.02.02 == 3.02.02\n version_good = True\n continue\n if ver[i]>num:\n # 4.0 > 3.02.02\n # 3.03.02 > 3.02.02\n version_good = True\n break\n if ver[i]<num:\n # 3.01.02 < 3.02.02\n break\n \n return version_good, ver_str",
"def test_check_version_release(self):\n with self.assertLogs(\"dakara_feeder.version\", \"DEBUG\") as logger:\n with patch.multiple(\n \"dakara_feeder.version\", __version__=\"0.0.0\", __date__=\"1970-01-01\"\n ):\n check_version()\n\n # assert effect on logs\n self.assertListEqual(\n logger.output,\n [\"INFO:dakara_feeder.version:\" \"Dakara feeder 0.0.0 (1970-01-01)\"],\n )",
"def test_release_update_available_MINOR(self):\n NEXT = '%d.%d-%d' % (MAJOR, MINOR + 1, 0)\n self._uri({\n '%d.%d/maintained/%s/all/Packages.gz' % (MAJOR, MINOR + 1, NEXT): DATA,\n })\n next = self.u.release_update_available()\n self.assertEqual(NEXT, next)",
"def test_release_version_found(self, mock_git_info): # pylint: disable=invalid-name, unused-argument\n set_version_from_git_tag(self.project, self.logger)\n self.assertEqual(self.logger.info.call_count, 2)\n self.assertEqual(self.project.version, '1.2.3')",
"def verify_branch(path, expected_branch=\"master\"):\n\n sys.stdout.write(\" - Verifying your branch is %s:\" % expected_branch)\n branch = run_in_component(path, ['git', 'rev-parse', '--abbrev-ref', 'HEAD'])\n branch = branch.strip()\n\n if branch == expected_branch:\n print(\" OKAY\")\n return\n\n print(\" FAILED\")\n\n raise GenericError(\"You must be on branch %s to release, you are on %s\" % (expected_branch, branch))",
"def check_tag_version(self):\n import subprocess\n\n version = self.get_tag()\n version = version[version.rfind(\"-\") + 1 :]\n\n if robocorp_code.__version__ == version:\n sys.stderr.write(\"Version matches (%s) (exit(0))\\n\" % (version,))\n sys.exit(0)\n else:\n sys.stderr.write(\n \"Version does not match (found in sources: %s != tag: %s) (exit(1))\\n\"\n % (robocorp_code.__version__, version)\n )\n sys.exit(1)",
"def test_release_update_available_CURRENT(self):\n NEXT = '%d.%d-%d' % (MAJOR, MINOR + 1, 0)\n self._ucr({\n 'repository/online/component/a': 'yes',\n 'repository/online/component/a/version': 'current',\n })\n self._uri({\n '%d.%d/maintained/%s/all/Packages.gz' % (MAJOR, MINOR + 1, NEXT): DATA,\n })\n self.assertRaises(U.RequiredComponentError, self.u.release_update_available, errorsto='exception')",
"def test_release_version():\n pkg_vars = {}\n with open(VERSION_FILE) as f:\n exec(f.read(), pkg_vars) # nosec\n project_version = pkg_vars[\"__version__\"]\n assert (\n RELEASE_TAG == f\"v{project_version}\"\n ), \"RELEASE_TAG does not match the project version\"",
"def checkCMSSWVersion(self, url = \"https://cmssdt.cern.ch/SDT/cgi-bin\", fileName = \"ReleasesXML?anytype=1\"):\n\n downloader = Downloader(url)\n goodRelease = False\n tagCollectorUrl = url + '/' + fileName\n\n try:\n result = downloader.config(fileName)\n except:\n common.logger.info(\"ERROR: Problem reading file of allowed CMSSW releases.\")\n\n try:\n events = pulldom.parseString(result)\n\n arch = None\n release = None\n relState = None\n for (event, node) in events:\n if event == pulldom.START_ELEMENT:\n if node.tagName == 'architecture':\n arch = node.attributes.getNamedItem('name').nodeValue\n if node.tagName == 'project':\n relState = node.attributes.getNamedItem('state').nodeValue\n if relState == 'Announced':\n release = node.attributes.getNamedItem('label').nodeValue\n if self.executable_arch == arch and self.version == release:\n goodRelease = True\n return goodRelease\n except:\n common.logger.info(\"Problems parsing file of allowed CMSSW releases.\")\n\n if not goodRelease and \\\n not self.cfg_params.get('CMSSW.allow_nonproductioncmssw',0)==\"1\" :\n msg = \"ERROR: %s on %s is not among supported releases listed at \\n %s .\" % (self.version, self.executable_arch, tagCollectorUrl)\n msg += \"\\n If you are sure of what you are doing you can set\"\n msg += \"\\n allow_NonProductionCMSSW = 1\"\n msg += \"\\n in the [CMSSW] section of crab.cfg.\"\n raise CrabException(msg)\n\n return goodRelease",
"def test_release_update_available_NO(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/all/Packages.gz' % (MAJOR, MINOR, MAJOR, MINOR, PATCH): DATA,\n })\n next = self.u.release_update_available()\n self.assertEqual(None, next)",
"def verify_support():\n ostype, majorrelease, _ = get_os_release_data()\n if ostype not in _supported_os:\n _logger.info('OS type %s is not supported.', ostype)\n return False\n if majorrelease not in _supported_release:\n _logger.info('OS %s %s is not supported', ostype, majorrelease)\n return False\n return True",
"def test_dev_version_if_dirty(self, mock_git_info): # pylint: disable=invalid-name, unused-argument\n # Test `patch` part\n self.get_dev_version('patch')\n self.assertEqual(self.project.version, '1.2.4.dev')\n # Test `minor` part\n self.get_dev_version('minor')\n self.assertEqual(self.project.version, '1.3.0.dev')\n # Test `major` part\n self.get_dev_version('major')\n self.assertEqual(self.project.version, '2.0.0.dev')\n # Test incorrect part\n self.project.set_property('semver_git_tag_increment_part', 'incorrect')\n with self.assertRaises(BuildFailedException) as context:\n set_version_from_git_tag(self.project, self.logger)\n err_msg = str(context.exception)\n self.assertTrue(\n (\"Incorrect value for `semver_git_tag_increment_part` property. \"\n \"Has to be in (`major`, `minor`, `patch`), \"\n \"but `incorrect` passed.\") in err_msg)",
"def is_dev_version(cls):\n\n # We initiate the command we have to run in order to\n # get the branch we are currently working with.\n command = \"git branch\"\n\n # We execute and get the command output.\n command_result = PyFunceble.helpers.Command(command).execute()\n\n for branch in command_result.split(\"\\n\"):\n # We loop through each line of the command output.\n\n if branch.startswith(\"*\") and (\"dev\" in branch or \"3.x\" in branch):\n # The current branch is `dev`.\n\n # We return True.\n return True\n\n # The current branch is not `dev`.\n\n # We return False.\n return False",
"def test_version(self):\n version_instance = get_version('kolibri', __file__)\n self.assertIn(version_instance.major_version, kolibri.__version__)",
"def check_release_exists(self, **kwargs):\n\n # List all available releases for logging and debugging purposes\n # These values are not used to actually check if the release is available\n logging.info(f\"Listing available releases since start date ({self.start_date}):\")\n for dt in pendulum.period(pendulum.instance(self.start_date), pendulum.today(\"UTC\")).range(\"years\"):\n response = requests.get(f\"https://api.crossref.org/snapshots/monthly/{dt.year}\")\n soup = BeautifulSoup(response.text)\n hrefs = soup.find_all(\"a\", href=True)\n for href in hrefs:\n logging.info(href[\"href\"])\n\n # Construct the release for the execution date and check if it exists.\n # The release for a given execution_date is added on the 5th day of the following month.\n # E.g. the 2020-05 release is added to the website on 2020-06-05.\n data_interval_start = kwargs[\"data_interval_start\"]\n exists = check_release_exists(data_interval_start, self.api_key)\n assert (\n exists\n ), f\"check_release_exists: release doesn't exist for month {data_interval_start.year}-{data_interval_start.month}, something is wrong and needs investigating.\"\n\n return True"
] | [
"0.69423383",
"0.6784403",
"0.66795766",
"0.6649919",
"0.6507445",
"0.64944094",
"0.63654304",
"0.6350596",
"0.6347604",
"0.63336277",
"0.6305721",
"0.6219666",
"0.62047046",
"0.61802447",
"0.6172858",
"0.612331",
"0.6118963",
"0.6113204",
"0.6088553",
"0.60614514",
"0.60483974",
"0.6042252",
"0.60349816",
"0.5964456",
"0.59581023",
"0.59537",
"0.59341335",
"0.5920642",
"0.5894531",
"0.58798224"
] | 0.73628855 | 0 |
Reads in audio file, processes it | def process_audio_file(self, file_name):
sig, sr = librosa.load(file_name, mono=True)
return self._extract_function(sig, sr) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_audio(f, downmix):\n if f.endswith('.mp3'):\n f = _mp3_hook(f)\n sr, audio = scipy.io.wavfile.read(f)\n if not audio.dtype is np.float32:\n audio = _normalize_pcm(audio)\n if downmix and len(audio.shape) == 2:\n audio = down_mix(audio)\n return sr, audio",
"def audioRead(path):\n data, samplerate = sf.read(path)\n frames = data.shape[0]\n channels = len(data.shape)\n duration = 1/samplerate*frames\n return data, samplerate, path, duration, frames, channels",
"def read_sound(self, inFile):\n\n # Python can natively only read \"wav\" files. To be flexible, use \"ffmpeg\" for conversion for other formats\n if not os.path.exists(inFile):\n print('{0} does not exist!'.format(inFile))\n raise FileNotFoundError\n \n (root, ext) = os.path.splitext(inFile)\n if ext[1:].lower() != 'wav':\n if self.ffmpeg_info.ffmpeg == None:\n print('Sorry, need FFMPEG for non-WAV files!')\n self.rate = None\n self.data = None\n raise NoFFMPEG_Error\n \n outFile = root + '.wav'\n cmd = [self.ffmpeg_info.ffmpeg, '-i', inFile, outFile, '-y']\n subprocess.run(cmd)\n print('Infile converted from ' + ext + ' to \".wav\"')\n \n inFile = outFile\n self.source = outFile\n\n self.rate, self.data = read(inFile)\n \n # Set the filename\n self.source = inFile\n \n # Make sure that the data are in some integer format\n # Otherwise, e.g. Windows has difficulty playing the sound\n # Note that \"self.source\" is set to \"None\", in order to\n # play the correct, converted file with \"play\"\n if not np.issubdtype(self.data.dtype, np.integer):\n self.generate_sound(self.data, self.rate)\n \n self._setInfo()\n print('data read in!')",
"def readAudioFile(path):\n\n extension = os.path.splitext(path)[1]\n\n try:\n # Commented below, as we don't need this\n # #if extension.lower() == '.wav':\n # #[Fs, x] = wavfile.read(path)\n # if extension.lower() == '.aif' or extension.lower() == '.aiff':\n # s = aifc.open(path, 'r')\n # nframes = s.getnframes()\n # strsig = s.readframes(nframes)\n # x = numpy.fromstring(strsig, numpy.short).byteswap()\n # Fs = s.getframerate()\n if extension.lower() == '.mp3' or extension.lower() == '.wav' or extension.lower() == '.au' or extension.lower() == '.ogg':\n try:\n audiofile = AudioSegment.from_file(path)\n except:\n print(\"Error: file not found or other I/O error. \"\n \"(DECODING FAILED)\")\n return -1 ,-1\n\n if audiofile.sample_width == 2:\n data = numpy.fromstring(audiofile._data, numpy.int16)\n elif audiofile.sample_width == 4:\n data = numpy.fromstring(audiofile._data, numpy.int32)\n else:\n return -1, -1\n Fs = audiofile.frame_rate\n x = numpy.array(data[0::audiofile.channels]).T\n else:\n print(\"Error in readAudioFile(): Unknown file type!\")\n return -1, -1\n except IOError:\n print(\"Error: file not found or other I/O error.\")\n return -1, -1\n\n if x.ndim == 2:\n if x.shape[1] == 2:\n x = x.flatten()\n\n return Fs, x",
"def read_audio(filename, sample_rate = 44100):\n loader = essentia.standard.MonoLoader(filename = filename, sampleRate = sample_rate)\n audio = loader()\n return audio",
"def process_sound_file(file_path):\n\n return to_mfcc(get_wav(file_path))",
"def get_audio():\n\tbuf = None\n\tnum_new_bytes = BUFFER_SIZE // REFRESH_BUFFER_FACTOR\n\twith open(INFILE) as fifo:\n\t\twhile True:\n\t\t\tif buf is None:\n\t\t\t\tbuf = fifo.read(BUFFER_SIZE)\n\t\t\telse:\n\t\t\t\tbuf = buf[num_new_bytes:] + fifo.read(num_new_bytes)\n\t\t\tyield buf",
"def audio(self):\n self.log_string += 'Audio file'\n self._media_processing()",
"def process_audio(fname, output_dir, poller):\n result = []\n try:\n if poller.params.candidate_transcripts is not None:\n out_path = \"{}/{}{}\".format(output_dir, os.path.splitext(os.path.basename(fname))[0], \".json\")\n else:\n out_path = \"{}/{}{}\".format(output_dir, os.path.splitext(os.path.basename(fname))[0], \".txt\")\n audio, audio_length = load_audio(fname, poller.params.model.sampleRate())\n pred = transcribe_audio(poller.params.model, audio, candidate_transcripts=poller.params.candidate_transcripts)\n with open(out_path, \"w\") as fp:\n fp.write(pred)\n result.append(out_path)\n except KeyboardInterrupt:\n poller.keyboard_interrupt()\n except:\n poller.error(\"Failed to process audio file: %s\\n%s\" % (fname, traceback.format_exc()))\n return result",
"def wavread(filename):\n\n\tif (os.path.isfile(filename) == False): # raise error if wrong input file\n\t\traise ValueError(\"Input file is wrong\")\n\n\tfs, x = read(filename)\n\n\tif (len(x.shape) !=1): # raise error if more than one channel\n x = np.mean(x,axis = 1)\n\t\tprint \"Audio file is stereo, converting to mono\"\n\n\t#scale down and convert audio into floating point number in range of -1 to 1\n\tx = np.float32(x)/norm_fact[x.dtype.name]\n\treturn fs, x",
"def play_audio_file(self, fname=DETECT_DONG):\n ding_wav = wave.open(fname, 'rb')\n ding_data = ding_wav.readframes(ding_wav.getnframes())\n # with no_alsa_error():\n audio = pyaudio.PyAudio()\n stream_out = audio.open(\n format=audio.get_format_from_width(ding_wav.getsampwidth()),\n channels=ding_wav.getnchannels(),\n rate=ding_wav.getframerate(), input=False, output=True)\n stream_out.start_stream()\n stream_out.write(ding_data)\n time.sleep(0.2)\n stream_out.stop_stream()\n stream_out.close()\n audio.terminate()",
"def load_and_process_audio(self):\n output_vector = None\n doa = None\n if self.model == \"gcc_cnn\":\n output_vector, doa = self.format_gcc_cnn()\n elif self.model == \"gcc_dsp\":\n output_vector, doa = self.format_gcc_dsp()\n elif self.model == \"raw_cnn\":\n output_vector, doa = self.format_raw_audio_cnn()\n elif self.model == \"raw_resnet\":\n output_vector, doa = self.format_raw_audio_cnn()\n else:\n print(\"Error -> No file found\")\n\n return output_vector, doa",
"def receive_audio(self):\n print(\"got to receive audio\")\n self.receive_audio_socket = self.start_socket(IP, RECEIVE_AUDIO_PORT)\n self.send_chunk(self.my_name.encode(), self.receive_audio_socket)\n print(self.receive_mes(self.receive_audio_socket))\n\n print(\"receive stream made\")\n i = 0\n done = False\n while not done:\n try:\n i += 1\n data = self.receive_audio_socket.recv(CHUNK) # gets audio chunk\n #print(\"got audio chunk number {} of length {}\".format(i, len(data)))\n self.lock.acquire()\n self.voice_stream.write(data) # plays\n self.lock.release()\n # if len(data) == 0:\n # done = True\n #print(\"wrote chunk #{}\".format(i))\n except socket.error as msg:\n print(\"socket failure receive audio: {}\".format(msg))\n done = True\n except KeyboardInterrupt:\n print(\"exception receive audio\")\n done = True\n self.receive_audio_socket.close()\n # stream_receive.close()\n # p_receive.terminate()",
"def run(self):\r\n\r\n p = pyaudio.PyAudio()\r\n\r\n stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),\r\n channels=wf.getnchannels(),\r\n rate=wf.getframerate(),\r\n output=True)\r\n\r\n musicdata = wf.readframes(CHUNK)\r\n\r\n while playing:\r\n if self.streamnum == 1:\r\n stream.write(musicdata)\r\n musicdata = wf.readframes(CHUNK)\r\n else:\r\n stream.write(musicdata)\r\n musicdata = wf2.readframes(CHUNK)\r\n if len(musicdata) < CHUNK or musicdata == '':\r\n if self.streamnum == 1:\r\n self.streamnum = 2\r\n else:\r\n self.streamnum = 1\r\n self.next = False\r\n if self.pause:\r\n while True:\r\n if not playing:\r\n return\r\n elif not self.pause:\r\n break\r\n\r\n stream.stop_stream()\r\n stream.close()\r\n\r\n p.terminate()",
"def play(self, context=None):\n\n self.nowPlaying = True\n\n # Open file for reading\n wf = wave.open(self.path + '/' + self.name, 'rb')\n p = pyaudio.PyAudio()\n\n # Open stream for playback\n stream = p.open( format = p.get_format_from_width( wf.getsampwidth() ),\n channels = wf.getnchannels(),\n rate = wf.getframerate(), output = True)\n\n # Read file in chunks of 1024 bytes\n data = wf.readframes(1024)\n\n # Read while there is data left to read\n # If nowPlaying is False, user has clicked Stop\n while data != '' and self.nowPlaying:\n stream.write(data)\n data = wf.readframes(1024)\n\n stream.stop_stream()\n stream.close()\n\n p.terminate()\n\n self.nowPlaying = False\n\n # Callback to UI to signal that audio has finished playing\n if context is not None:\n context.stopAudio()",
"def load_audio(self):\n df = pd.read_csv(\"{dir}/iteration_{iter}.csv\".format(dir=self.directory, iter=self.iteration),\n usecols=[1, 2, 3])\n\n doa_from_file = df.iloc[0][1]\n wav_name = df.iloc[0][0]\n filename = \"{dir}/{wav_name}\".format(dir=self.directory, wav_name=wav_name)\n\n y, sr = librosa.load(filename, mono=False)\n\n y_8k = librosa.resample(y, sr, 8000)\n result_x = librosa.util.fix_length(y_8k, 8000)\n\n return result_x, doa_from_file",
"def play_audio(filename):\n chunk = 1024\n wf = wave.open(filename, 'rb')\n pa = pyaudio.PyAudio()\n stream = pa.open(\n format=pa.get_format_from_width(wf.getsampwidth()),\n channels=wf.getnchannels(),\n rate=wf.getframerate(),\n output=True\n )\n data_stream = wf.readframes(chunk)\n while data_stream:\n stream.write(data_stream)\n data_stream = wf.readframes(chunk)\n stream.close()\n pa.terminate()",
"def read_audio(file_path, resample_rate=None, to_mono=False):\n return librosa.load(file_path, sr=resample_rate, mono=to_mono)",
"def play_audio(file: str) -> None:\n pygame.mixer.init()\n pygame.mixer.music.load(file)\n pygame.mixer.music.play()\n\n while pygame.mixer.music.get_busy():\n continue",
"def play(self):\n\n try:\n if self.source is None:\n # If there is no source-file, write the data to a temporary WAV-file ...\n tmpFile = tempfile.NamedTemporaryFile(suffix='.wav', delete=False)\n tmpFile.close()\n self.write_wav(tmpFile.name)\n \n # ... and play that file\n if sys.platform=='win32':\n winsound.PlaySound(tmpFile.name, winsound.SND_FILENAME)\n elif sys.platform == 'darwin':\n cmd = ['afplay', tmpFile.name]\n subprocess.run(cmd)\n else:\n pygame.init()\n pygame.mixer.music.load(tmpFile.name)\n pygame.mixer.music.play()\n time.sleep(self.duration)\n \n # If you want to use FFMPEG instead, use the following commands:\n #cmd = [self.ffmpeg_info.ffplay, '-autoexit', '-nodisp', '-i', tmpFile.name]\n #subprocess.run(cmd)\n \n elif os.path.exists(self.source):\n # If you have a given input file ...\n print('Playing ' + self.source)\n \n # ... then play that one\n if sys.platform == 'win32':\n winsound.PlaySound(str(self.source), winsound.SND_FILENAME)\n elif sys.platform == 'darwin':\n cmd = ['afplay', str(self.source)]\n subprocess.run(cmd)\n else:\n pygame.init()\n pygame.mixer.music.load(self.source)\n pygame.mixer.music.play()\n time.sleep(self.duration)\n \n # If you want to use FFMPEG instead, use the following commands:\n #cmd = [self.ffmpeg_info.ffplay, '-autoexit', '-nodisp', '-i', self.source]\n #subprocess.run(cmd)\n \n except SystemError:\n print('If you don''t have FFMPEG available, you can e.g. use installed audio-files. E.g.:')\n print('import subprocess')\n print('subprocess.run([r\"C:\\Program Files (x86)\\VideoLAN\\VLC\\vlc.exe\", r\"C:\\Music\\14_Streets_of_Philadelphia.mp3\"])')",
"def read_audio(self, path_to_wav):\n y, sr = librosa.load(path_to_wav, sr=None)\n return (y, sr)",
"def process_files(audio_files, context=[]):\n\n results = []\n bar_limit = len(audio_files)\n client = speech.SpeechClient()\n with Bar('Processing:', max=bar_limit) as bar:\n for audio in audio_files:\n response = convert_speech_to_text(client, audio, context)\n (transcription, confidence) = transcript(response)\n results.append({\n \"path\": audio,\n \"transcription\": transcription,\n \"confidence\": confidence\n })\n bar.next()\n return results",
"def inputwav(filename):\n data, sr = sf.read(filename)\n print('Decoding \"'+filename+'\"...')\n print('Sample rate is '+str(sr)+'...')\n try:\n ch=len(data[0,])\n except:\n ch=1\n print('File contains '+str(ch)+' audio channel(s)...')\n #Reshape the data so other functions can interpret the array if mono.\n #basically transposing the data\n if ch==1:\n data=data.reshape(-1,1)\n n=len(data)\n #This prevents log(data) producing nan when data is 0\n data[np.where(data==0)]=0.00001\n #convert to dB\n data_dB=20*np.log10(abs(data))\n return n, data,data_dB,sr, ch",
"def loadAudio(self,path):\r\n if self.vid:# Release video to access\r\n self.vid.release()\r\n # Check if has audio\r\n mixer.music.unload()\r\n command = \"ffprobe -i \\\"{0}\\\" -show_streams -select_streams a -loglevel error\".format(path)\r\n result = run(command,stdout=PIPE,stderr=PIPE,universal_newlines=True,shell=True)\r\n if result.stdout.startswith(\"[STREAM]\"):# Contains audio\r\n self.hasAudio = True\r\n else:\r\n self.hasAudio = False\r\n return\r\n print(\"Preparing Audio...\",end=\"\")\r\n filename = \"project_audio.mp3\"\r\n self.aud_path = filename\r\n t_start = time.time()\r\n # Extract audio using ffmpeg, always overwrite\r\n command = \"ffmpeg -y -i \\\"{0}\\\" \\\"{1}\\\"\".format(path,filename)\r\n result = run(command,stdout=PIPE,stderr=PIPE,universal_newlines=True,shell=True)\r\n## print(result.stderr)\r\n t_end = time.time()\r\n print(\"Done[{0}]\".format(int(t_end-t_start)))\r\n try:\r\n mixer.music.unload()\r\n mixer.music.load(filename)\r\n except:\r\n print(\"Error Loading Audio\")\r\n self.hasAudio = False\r\n self.vid = cv2.VideoCapture(self.vid_path)# Reload video component\r\n # Launch in GUI Thread\r",
"def readAudioData(self, shouldProcess):\n if shouldProcess:\n return gatherData(self.playlists) \n else:\n return pd.read_pickle(\"data/audioDF.pkl\")",
"def read_process_song(path, window=1, overlap=0, debug=True):\n\n arr_features = []\n\n signal, sr = librosa.load(path)\n signal = signal[:660000]\n\n # Debug process\n if debug:\n print(\"Reading file: {}\".format(path))\n\n # Split songs:\n samples = split_songs(signal, window, overlap)\n\n # Append the result to the data structure\n for s in samples:\n features = get_features(s, sr)\n arr_features.append(features)\n return arr_features",
"def process_file(self, file_name):\n logger.info(f'Recognising speech for {file_name}')\n wf = wave.open(file_name, \"rb\")\n # Check to see if the audio file can be read by the Vosk model\n if wf.getnchannels() != 1 or wf.getsampwidth() != 2 or wf.getcomptype() != \"NONE\":\n raise Exception(f'Invalid file format for {file_name}')\n rec = KaldiRecognizer(self.model, wf.getframerate())\n results = []\n while True:\n data = wf.readframes(config.frame_to_read)\n # If the data we have read is empty then we are at the end of the file\n if len(data) == 0:\n break\n if rec.AcceptWaveform(data):\n result = json.loads(rec.Result())\n # Result can contain an empty text string but no result list\n if len(result['text']) > 0:\n # If we reach here we have accepted the translation of a section of text\n results.extend(result['result'])\n result = json.loads(rec.FinalResult())\n # Add to results list\n if len(result['text']) > 0:\n results.extend(result['result'])\n logger.info(f'Processed speech, captured {len(results)} results')\n return results",
"def handle_audio_input(message):\n def build_context(msg: Message):\n ctx = {'client_name': 'mycroft_listener',\n 'source': msg.context.get(\"source\" or \"speech_api\"),\n 'destination': [\"skills\"],\n \"audio_parser_data\": msg.context.get(\"audio_parser_data\"),\n \"client\": msg.context.get(\"client\"), # origin (local, klat, nano, mobile, api)\n \"neon_should_respond\": msg.context.get(\"neon_should_respond\"),\n \"username\": msg.context.get(\"username\"),\n \"timing\": {\"start\": msg.data.get(\"time\"),\n \"transcribed\": time.time()},\n \"ident\": msg.context.get(\"ident\", time.time())\n }\n if msg.context.get(\"klat_data\"):\n ctx[\"klat_data\"] = msg.context(\"klat_data\")\n ctx[\"nick_profiles\"] = msg.context.get(\"nick_profiles\")\n return ctx\n\n ident = message.context.get(\"ident\") or \"neon.audio_input.response\"\n wav_file_path = message.data.get(\"audio_file\")\n lang = message.data.get(\"lang\")\n try:\n _, parser_data, transcriptions = _get_stt_from_file(wav_file_path, lang)\n message.context[\"audio_parser_data\"] = parser_data\n context = build_context(message)\n data = {\n \"utterances\": transcriptions,\n \"lang\": message.data.get(\"lang\", \"en-us\")\n }\n handled = _emit_utterance_to_skills(Message('recognizer_loop:utterance', data, context))\n bus.emit(message.reply(ident, data={\"parser_data\": parser_data,\n \"transcripts\": transcriptions,\n \"skills_recv\": handled}))\n except Exception as e:\n LOG.error(e)\n bus.emit(message.reply(ident, data={\"error\": repr(e)}))",
"def load_audio(file_path):\n # load the audio file in its original sampling rate\n audio_data, sr = librosa.load(file_path, sr=sampling_rate)\n\n # get the common file name\n file_name = file_path.split(\"/\")[-1]\n file_name = file_name.split(\".wav\")[0]\n\n # calculate number of samples in the time duration needed\n num_samples = int(sr*time_duration)\n\n # get the cut-off audio signals and save them\n for i in np.arange(num_audio_files):\n audio_cut_data = cut_sample(audio_data, num_samples)\n file_path=dir_to_save + file_name + \"_\" + str(i+1) + \".wav\"\n save_sample(audio_cut_data, file_path, sr)\n print(f\"generating signal {str(i)}, its length {len(audio_cut_data)} by cutting the original signal\")",
"def read_audiofile(audio_name,cutToLength):\n fs, data = wavfile.read(audio_name)\n # sa.play_buffer(audio_data, num_channels, bydeftes_per_sample,sample_rate)\n #play_obj = sa.play_buffer(data,1,2,fs)\n #play_obj.stop()\n # delete one column. Make mono channel\n if data.shape[1]>1:\n data = numpy.delete(data,1,1)\n #downsample if signal is broad\n if fs>24000:\n data = numpy.delete(data, numpy.s_[::2], 0)\n fs = int(fs/2)\n \n data = data[data!=0]\n data = numpy.delete(data,numpy.s_[ int(cutToLength*fs):len(data)] )\n return data"
] | [
"0.7045707",
"0.6956398",
"0.6821181",
"0.6742061",
"0.6704527",
"0.6699321",
"0.6644113",
"0.6615665",
"0.64650714",
"0.6453779",
"0.63955903",
"0.63811314",
"0.63749427",
"0.6367572",
"0.63429946",
"0.6340871",
"0.633396",
"0.6314544",
"0.6308602",
"0.6294938",
"0.6285965",
"0.62740135",
"0.62489915",
"0.623837",
"0.6233427",
"0.62217736",
"0.6196891",
"0.618415",
"0.6141494",
"0.6132818"
] | 0.72694874 | 0 |
r""" Return ``n`` independent symbolic matrices in dimension ``d``. | def symbolic_max_plus_matrices(d, n, ch=None, typ='sym'):
d = int(d)
n = int(n)
if d <= 0:
raise ValueError("d (= {}) must be postive".format(d))
nvar = n * d * d
V = FreeModule(ZZ, nvar)
B = ((b,) for b in V.basis())
matrices = []
if d == 1:
typ = 'full'
if typ == 'sym' or typ == 'quick':
z = [0]*nvar
for i in range(n):
z[i*d*d] = 1
diag = (V(z),)
z[i*d*d] = 0
z[i*d*d+1] = 1
nondiag = (V(z),)
z[i*d*d+1] = 0
if typ == 'sym':
matrices.append(SymbolicSymmetricMaxPlusMatrix(d, n, diag, nondiag, ch))
else:
matrices.append(QuickSymbolicSymmetricMaxPlusMatrix(d, n, diag, nondiag, ch))
elif typ == 'full':
for i in range(n):
mat = []
for j in range(d):
mat.append([next(B) for k in range(d)])
matrices.append(SymbolicMaxPlusMatrix(d, nvar, mat, ch))
else:
raise ValueError
return matrices | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def basis(d, symbolic=True):\n X = sym.symbols('X')\n if d == 0:\n phi_sym = [1]\n else:\n if symbolic:\n h = sym.Rational(1, d) # node spacing\n nodes = [2*i*h - 1 for i in range(d+1)]\n else:\n nodes = np.linspace(-1, 1, d+1)\n \n phi_sym = [Lagrange_polynomials(X, r, nodes) for r in range(d+1)]\n \n # Transform to Python functions\n phi_num = [sym.lambdify([X], phi_sym[r], modules='numpy') for r in range(d+1)]\n return phi_sym if symbolic else phi_num",
"def makeHadamard(n, d):\n return [[1 if d[\"r%dc%d\" % (i, j)] else 0 for j in range(n)] for i in range(n)]",
"def matIxs( n ):\n rows, cols = np.indices( (n,n) )\n row = rows.flatten()\n col = cols.flatten()\n \n return map( lambda x: Vector( x[0], x[1] ), zip( col, row ) )",
"def matrix_chain_dynamic(dimensions, n):\n\n m = [[-1 for _ in range(n)] for _ in range(n)]\n s = [[0 for _ in range(n)] for _ in range(n)]\n\n # multiplying matrix by itself\n for i in range(1, n):\n m[i][i] = 0\n\n for length in range(2, n):\n for i in range(1, n - length + 1):\n j = i + length - 1\n for k in range(i, j):\n cost = m[i][k] + m[k + 1][j] + dimensions[i - 1] * dimensions[k] * dimensions[j]\n if cost > m[i][j]:\n m[i][j] = cost\n # index if splitting\n s[i][j] = k\n return m, s",
"def find_linear_recurrence(self,n,d=None,gfvar=None):\n from sympy.simplify import simplify\n x = [simplify(expand(t)) for t in self[:n]]\n lx = len(x)\n if d is None:\n r = lx//2\n else:\n r = min(d,lx//2)\n coeffs = []\n for l in range(1, r+1):\n l2 = 2*l\n mlist = []\n for k in range(l):\n mlist.append(x[k:k+l])\n m = Matrix(mlist)\n if m.det() != 0:\n y = simplify(m.LUsolve(Matrix(x[l:l2])))\n if lx == l2:\n coeffs = flatten(y[::-1])\n break\n mlist = []\n for k in range(l,lx-l):\n mlist.append(x[k:k+l])\n m = Matrix(mlist)\n if m*y == Matrix(x[l2:]):\n coeffs = flatten(y[::-1])\n break\n if gfvar is None:\n return coeffs\n else:\n l = len(coeffs)\n if l == 0:\n return [], None\n else:\n n, d = x[l-1]*gfvar**(l-1), 1 - coeffs[l-1]*gfvar**l\n for i in range(l-1):\n n += x[i]*gfvar**i\n for j in range(l-i-1):\n n -= coeffs[i]*x[j]*gfvar**(i+j+1)\n d -= coeffs[i]*gfvar**(i+1)\n return coeffs, simplify(factor(n)/factor(d))",
"def symbolic_max_plus_identity(d, nvar, ch=None):\n d = int(d)\n nvar = int(nvar)\n V = FreeModule(ZZ, nvar)\n e = ()\n zero = (V([0]*nvar),)\n\n data = [[zero if i == j else e for j in range(d)] for i in range(d)]\n return SymbolicMaxPlusMatrix(d, nvar, data, ch)",
"def _identity_dense(d, dtype=complex):\n return np.eye(d, dtype=dtype)",
"def solve(n=10):\n return sum(M_N_S(n, d)[2] for d in range(10))",
"def _relax_matrix(self, n=1):\n\n for i in range(n):\n self.level.mid.reshape(-1)[:] = self.R_w.dot(self.level.mid.reshape(-1)) \\\n + self.omega * self.level.rhs / self.D",
"def diag_indices(n, ndim=2):\n\n if not use_origin_backend():\n return dpnp_diag_indices(n, ndim)\n\n return call_origin(numpy.diag_indices, n, ndim)",
"def expansion_matrix_d(self):\n row = self._base_nlp._d_map\n nnz = len(self._base_nlp._d_map)\n col = np.arange(nnz, dtype=np.int)\n data = np.ones(nnz)\n return csr_matrix((data, (row, col)), shape=(self.ng, nnz))",
"def random_planted_matrix(d, n, replace='True'):\n all_idx = np.asarray(list(zip(*np.tril_indices(d,-1))))\n chosen_idx_positions = np.random.choice(len(all_idx), size=n, replace=replace)\n subspaces = all_idx[chosen_idx_positions]\n angles = 2*np.pi * (np.random.rand(len(subspaces)) - 0.5)\n U = np.eye(d)\n for s, alpha in zip(subspaces, angles):\n U = right_givens(math.cos(alpha), math.sin(alpha), U, s[0], s[1])\n return U",
"def construct_M_N(n):\n n2 = n**2\n D0 = 2*np.ones(n2) # 0th diagonal\n D1 = - np.ones(n2 - 1) # -1st, 1st diagonals\n D1[n-1::n] = 0 # Setting every k*n-1 entries = 0 for k < n\n DN = - np.ones(n2 - n) # -nth, nth diagonals\n return (scipy.sparse.diags((D1, D0, D1), (-1, 0, 1), shape=(n2, n2), format=\"csr\"),\n scipy.sparse.diags((DN, D0, DN), (-n, 0, n), shape=(n2, n2), format=\"csr\"))",
"def diag_indices(n, ndim=2):\r\n idx = np.arange(n)\r\n return (idx,) * ndim",
"def dens_matrix(state):\n size = len(state)\n state_conj = np.conj(state)\n dm = np.zeros((size,) * 4, dtype=complex)\n\n for p1 in range(size):\n for p2 in range(size):\n for p1_ in range(size):\n for p2_ in range(size):\n dm[p1, p2, p1_, p2_] = state[p1, p2] * state_conj[p1_, p2_]\n\n return dm",
"def Dinvmatrix(N):\r\n import numpy as np\r\n D = np.zeros((N,N,2))\r\n D[:,:,0] = np.diag((np.append(np.ones((1,int(N/2))),np.zeros((1,int(N/2))))))\r\n D[:,:,1] = np.diag((np.append(np.zeros((1,int(N/2))),np.ones((1,int(N/2))))))\r\n return D",
"def symmetrize(n):\n times = lambda x: jnp.concatenate((jnp.flipud(x), x))\n trans = lambda x: x[n:] + x[n-1::-1]\n return Operator(times=times, trans=trans, shape=(2*n,n))",
"def get_matrixS(n):\n\n mat_nxn = np.zeros([n, n], dtype=int)\n for row_num in range(1, n + 1):\n i = row_num - 1\n if row_num == 1:\n mat_nxn[i][i + 1] = 1\n mat_nxn[i][i + 2] = 1\n elif row_num == 2:\n mat_nxn[i][i - 1] = 1\n mat_nxn[i][i + 2] = 1\n elif row_num == n - 1:\n mat_nxn[i][i + 1] = 1\n mat_nxn[i][i - 2] = 1\n elif row_num == n:\n mat_nxn[i][i - 1] = 1\n mat_nxn[i][i - 2] = 1\n elif row_num % 2 == 1:\n mat_nxn[i][i + 1] = 1\n mat_nxn[i][i + 2] = 1\n mat_nxn[i][i - 2] = 1\n elif row_num % 2 == 0:\n mat_nxn[i][i - 1] = 1\n mat_nxn[i][i + 2] = 1\n mat_nxn[i][i - 2] = 1\n mat_nxn = mat_nxn + np.eye(n, dtype=int)\n mat_2nx2n = np.repeat(np.repeat(mat_nxn, 2, 0), 2, 1)\n return torch.as_tensor(mat_2nx2n)",
"def dirac(self,n):\r\n y = np.zeros(len(n),dtype = complex)\r\n y[n==0] = 1\r\n return y",
"def all_basis_vectors(n: int) -> list:\n assert n >= 0, \"n must be > 0\"\n basis_1dim = ['0', '1']\n\n if n == 0:\n return []\n if n == 1:\n return basis_1dim\n else:\n current_basis = basis_1dim\n for i in range(1, n):\n # can be made more efficient (e.g. by current_basis, current basis until we reach sqrt(n))\n current_basis = outer_subspace_product(basis_1dim, current_basis)\n\n return current_basis",
"def _compute_ind_mat(n, m, nb_coeff):\r\n\r\n ind_mat = np.zeros((nb_coeff, n))\r\n curr_idx = 0\r\n for indexes in itr.combinations_with_replacement(range(m), n):\r\n ind_mat[curr_idx] = np.array(indexes)\r\n curr_idx += 1\r\n\r\n return ind_mat",
"def matrix_N1(l, omega, S, cn):\n sqrt = np.sqrt(l * (l + 1))\n zl = omega * S / cn['l']\n zt = omega * S / cn['t']\n col1 = - np.array((dN1(l, zt), dN2(l, zt), dN3(l, zt), dN4(l, zt))) / zt\n col2 = (sqrt * np.array((dL1(l, zt), dL2(l, zl),\n dL3(l, zl), dL4(l, zt, zl))) / zl\n )\n N = np.array((col1, col2))\n return N.T",
"def novelty(self, d, x):\n # measure the data and check if the dimmension agree\n N = len(x)\n if not len(d) == N:\n raise ValueError('The length of vector d and matrix x must agree.')\n self.n = len(x[0])\n # prepare data\n try:\n x = np.array(x)\n d = np.array(d)\n except:\n raise ValueError('Impossible to convert x or d to a numpy array')\n # create empty arrays\n y = np.zeros(N)\n e = np.zeros(N)\n nd = np.zeros((N,self.n))\n self.w_history = np.zeros((N,self.n))\n # adaptation loop\n for k in range(N):\n self.update_memory_x(x[k])\n m_d, m_x = self.read_memory()\n # estimate\n y[k] = np.dot(self.w, x[k]-m_x) + m_d\n e[k] = d[k] - y[k]\n nu = self.mu / (self.eps + np.dot(x[k]-m_x, x[k]-m_x))\n dw = nu * e[k] * (x[k]-m_x)\n self.w += dw\n self.w_history[k,:] = self.w\n nd[k,:] = dw * e[k]\n self.update_memory_d(d[k])\n return y, e, self.w_history, nd",
"def all_matrices(n):\n complete = int(n * (n-1) / 2)\n least = (n-1)*2 - 1 # the number of edges is at least 2(n-1)-1\n all_possible_list = [i for i in itertools.product([0, 1], repeat=complete)\n if sum(i) >= least]\n all_mats = [create_matrix(i, n) for i in all_possible_list]\n return all_mats",
"def get_dct_matrix(N):\r\n dct_m = np.eye(N)\r\n for k in np.arange(N):\r\n for i in np.arange(N):\r\n w = np.sqrt(2 / N)\r\n if k == 0:\r\n w = np.sqrt(1 / N)\r\n dct_m[k, i] = w * np.cos(np.pi * (i + 1 / 2) * k / N)\r\n idct_m = np.linalg.inv(dct_m)\r\n return dct_m, idct_m",
"def identity_matrix(n):\n data = [[1 if c == r else 0 for c in range(n)] for r in range(n)]\n return Matrix(data)",
"def _dmatrix(kn_u, kn_d):\n d = np.zeros((kn_u.size, 4, 4), np.complex128)\n d_inv = np.zeros_like(d)\n\n d[:, 0, 0] = 1\n d[:, 0, 1] = 1\n d[:, 1, 0] = kn_u\n d[:, 1, 1] = -kn_u\n\n d[:, 2, 2] = 1\n d[:, 2, 3] = 1\n d[:, 3, 2] = kn_d\n d[:, 3, 3] = -kn_d\n\n # an analytic matrix inverse saves time\n inv_kn_u = 0.5 / kn_u\n inv_kn_d = 0.5 / kn_d\n\n d_inv[:, 0, 0] = 0.5\n d_inv[:, 0, 1] = inv_kn_u\n d_inv[:, 1, 0] = 0.5\n d_inv[:, 1, 1] = -inv_kn_u\n\n d_inv[:, 2, 2] = 0.5\n d_inv[:, 2, 3] = inv_kn_d\n d_inv[:, 3, 2] = 0.5\n d_inv[:, 3, 3] = -inv_kn_d\n\n return d, d_inv",
"def decomposition_into_s_n_irreducibles(self, n):\r\n w5 = partitions_list(n)\r\n M5 = form_matrix_yt(w5)\r\n card = math.factorial(n)\r\n vec_dic = {}\r\n for k in range(self.dimension()+1):\r\n D = {}\r\n uu = []\r\n vv = []\r\n p = k \r\n A = self.matrix_simmetric_representate(p)\r\n if (p >0 and (p <= self.dimension())):\r\n null = nullspace(A)\r\n w3 = []\r\n for i in range(len(null[0])):\r\n w = []\r\n for j in range(len(null)):\r\n w.append(null[j][i])\r\n w3.append(w) \r\n null = w3\r\n M = np.matrix(w3, dtype= np.float64).transpose()\r\n Mi = np.linalg.pinv(M)\r\n else:\r\n if (p == 0):\r\n M = A\r\n null = []\r\n for i in range(A.shape[0]):\r\n aux = []\r\n for j in range(A.shape[1]):\r\n aux.append(M[i,j])\r\n null.append(aux)\r\n M = np.matrix(null, dtype=np.float64)\r\n Mi = M\r\n p = k + 1\r\n if (p>0 and (p <= self.dimension())):\r\n A1=self.matrix_simmetric_representate(p)\r\n col = columnspace(A1)\r\n w4 = []\r\n for i in range(len(col[0])):\r\n w = []\r\n for j in range(len(col)):\r\n w.append(col[j][i])\r\n w4.append(w)\r\n col = w4\r\n M1 = np.matrix(w4, dtype=np.float64).transpose()\r\n Mii = np.linalg.pinv(M1)\r\n for h in w5:\r\n p = k \r\n if (p >0 and (p <= self.dimension())):\r\n if (all(elem == 0 for elem in null[0])):\r\n l1 = 0\r\n else:\r\n he = self.basis_group_oriented_p_chains(p) \r\n on1 = np.ones(len(list(he.dic.keys())), dtype=np.float64) \r\n v = P_chains([],[])\r\n v = P_chains(list(he.dic.keys()),on1)\r\n v1 = permutation_in_simplex_test(v, make_permutation(h))\r\n D1={}\r\n c1 = 0\r\n for i in list(v1.dic.keys()):\r\n c2 = 1\r\n for j in list(he.dic.keys()):\r\n if (i == j):\r\n if (v1.dic[i] == he.dic[j]):\r\n D1[c1] = c2\r\n else:\r\n D1[c1] = -c2\r\n c2 = c2 + 1\r\n c1 = c1 + 1\r\n rr = M.shape[0]\r\n cc = M.shape[1]\r\n Ma = np.zeros([rr,cc],dtype=np.float64)\r\n for i in range(rr):\r\n Ma[i,:] = (M[(abs(D1[i])-1),:]*(np.sign(D1[i])))\r\n l1 = 0\r\n for j in range(cc):\r\n l1 = np.dot(Mi[j,:],Ma[:,j])[0,0] + l1\r\n else:\r\n if (p == 0):\r\n he = self.basis_group_oriented_p_chains(p) \r\n on1 = np.ones(len(list(he.dic.keys())), dtype=np.float64) \r\n v = P_chains([],[])\r\n v = P_chains(list(he.dic.keys()),on1)\r\n v1 = permutation_in_simplex_test(v, make_permutation(h))\r\n D1={}\r\n c1 = 0\r\n for i in list(v1.dic.keys()):\r\n c2 = 1\r\n for j in list(he.dic.keys()):\r\n if (i == j):\r\n if (v1.dic[i] == he.dic[j]):\r\n D1[c1] = c2\r\n else:\r\n D1[c1] = -c2\r\n c2 = c2 + 1\r\n c1 = c1 + 1\r\n rr = M.shape[0]\r\n cc = M.shape[1]\r\n Ma = np.zeros([rr,cc],dtype=np.float64)\r\n for i in range(rr):\r\n Ma[i,:] = (M[(abs(D1[i])-1),:]*(np.sign(D1[i])))\r\n l1 = 0\r\n for j in range(cc):\r\n l1 = np.dot(Mi[j,:],Ma[:,j])[0,0] + l1\r\n else:\r\n l1 = 0\r\n p = k + 1\r\n if (p>0 and (p <= self.dimension())):\r\n hi = self.basis_group_oriented_p_chains(p-1) \r\n on1i = np.ones(len(list(hi.dic.keys())), dtype=np.float64) \r\n vi = P_chains([],[])\r\n vi = P_chains(list(hi.dic.keys()),on1i)\r\n v1i = permutation_in_simplex_test(vi, make_permutation(h))\r\n D1i={}\r\n c1 = 0\r\n for i in list(v1i.dic.keys()):\r\n c2 = 1\r\n for j in list(hi.dic.keys()):\r\n if (i == j):\r\n if (v1i.dic[i] == hi.dic[j]):\r\n D1i[c1] = c2\r\n else:\r\n D1i[c1] = -c2\r\n c2 = c2 + 1\r\n c1 = c1 + 1\r\n rr = M1.shape[0]\r\n cc = M1.shape[1]\r\n Mai = np.zeros([rr,cc],dtype=np.float64)\r\n for i in range(rr):\r\n Mai[i,:] = (M1[(abs(D1i[i])-1),:]*(np.sign(D1i[i])))\r\n l2 = 0\r\n for j in range(cc):\r\n l2 = np.dot(Mii[j,:],Mai[:,j])[0,0] + l2\r\n else:\r\n l2 = 0\r\n uu.append(l1-l2) \r\n vv.append(size_conjugacy_class(h,n))\r\n for i in range(M5.shape[0]):\r\n Ip = 0\r\n for j in range(M5.shape[1]):\r\n Ip = Ip + M5[i,j]*uu[j]*vv[j]\r\n Ip = Ip/card\r\n D[tuple(w5[i])] = abs(round(Ip))\r\n '''Note that I am using round, only because the results obtained are \r\n not esthetics'''\r\n vec_dic[k] = D\r\n return vec_dic",
"def I(n):\n identity = Matrix(n,n)\n print identity.matrix\n index = 0 \n for i in range(identity.nrows):\n for j in range(identity.ncols):\n identity.matrix[i][index] = 1\n index += 1\n\n\n flat = []\n for i in range(identity.nrows):\n for j in range(identity.ncols):\n flat.append(identity.matrix[i][j])\n\n\n return identity",
"def expansion_matrix_dl(self):\n\n row = self._base_nlp._lower_d_map\n nnz = len(self._base_nlp._lower_d_map)\n col = np.arange(nnz, dtype=np.int)\n data = np.ones(nnz)\n return csr_matrix((data, (row, col)), shape=(self.nd, nnz))"
] | [
"0.59795463",
"0.5888374",
"0.57391495",
"0.5711324",
"0.5686454",
"0.5563071",
"0.5492323",
"0.5435326",
"0.5423058",
"0.5419523",
"0.54059994",
"0.5392236",
"0.538575",
"0.5366352",
"0.5356909",
"0.53455615",
"0.5337914",
"0.5334625",
"0.5330213",
"0.53227186",
"0.53154725",
"0.53090394",
"0.5283747",
"0.5281429",
"0.526937",
"0.52573615",
"0.5251509",
"0.52102005",
"0.5202278",
"0.51470196"
] | 0.58973736 | 1 |
r""" Return a string that describes the convex hull engine. | def convex_hull_engine(self):
return self.convex_hull._name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _repr_(self):\n desc = ''\n if self.n_vertices()==0:\n desc += 'The empty polyhedron'\n else:\n desc += 'A ' + repr(self.dim()) + '-dimensional polyhedron'\n desc += ' in '\n if self.field()==QQ: desc += 'QQ'\n else: desc += 'RDF'\n desc += '^' + repr(self.ambient_dim()) \n\n if self.n_vertices()>0:\n desc += ' defined as the convex hull of ' \n desc += repr(self.n_vertices())\n if self.n_vertices()==1: desc += ' vertex'\n else: desc += ' vertices'\n \n if self.n_rays()>0:\n if self.n_lines()>0: desc += \", \"\n else: desc += \" and \"\n desc += repr(self.n_rays())\n if self.n_rays()==1: desc += ' ray'\n else: desc += ' rays'\n \n if self.n_lines()>0:\n if self.n_rays()>0: desc += \", \"\n else: desc += \" and \"\n desc += repr(self.n_lines())\n if self.n_lines()==1: desc +=' line'\n else: desc +=' lines'\n\n return desc + \".\\n\";",
"def convex_hull(self):\n return self._geomgen(capi.geom_convex_hull)",
"def convex_hull(self):\n return _property_geo(arctern.ST_ConvexHull, self)",
"def _repr_(self):\n return \"Projective hypersurface defined by %s in %s\"%(\n self.defining_polynomial(), self.ambient_space())",
"def _repr_(self):\n return \"Affine hypersurface defined by %s in %s\"%(\n self.defining_polynomial(), self.ambient_space())",
"def convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.\n # Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull\n lower = []\n for p in points:\n cont = 1\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n print(\"antes \"), print(cont), print(lower)\n lower.pop()\n print(\"despues \"),print(lower)\n cont += 1\n lower.append(p)\n xlower ,ylower = getlists(lower)\n plt.plot(xlower,ylower,color=\"yellow\")\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n print(upper)\n print(\"hello2 \")\n print(cross((2,0),(2,4),(2.5,3)))\n\n xupper ,yupper = getlists(upper)\n plt.plot(xupper,yupper,color=\"blue\")\n\n\n return lower[:-1] + upper[:-1]",
"def _repr_(self):\n s = 'The projection of a polyhedron into ' + repr(self.dimension) \n s += ' dimensions.'\n return s + \"\\n\"",
"def hull(self):\n capacity = self._getAttribute(Attribute.hullCapacity)\n em = self._getAttribute(Attribute.hullEM)\n explosive = self._getAttribute(Attribute.hullExplosive)\n kinetic = self._getAttribute(Attribute.hullKinetic)\n thermal = self._getAttribute(Attribute.hullThermal)\n\n em = 1.0 - em\n explosive = 1.0 - explosive\n kinetic = 1.0 - kinetic\n thermal = 1.0 - thermal\n\n return {\n \"capacity\": capacity,\n \"resists\": {\n \"em\": em,\n \"explosive\": explosive,\n \"kinetic\": kinetic,\n \"thermal\": thermal\n }\n }",
"def convex_hull(self):\n nodes = self._datacontroller.get_data('nodes')\n scale = self._datacontroller.get_data('scale')\n hull = tsputil.convex_hull_helper(nodes)\n if hull:\n result = construct_step(hull, 'Most Top Left Node', 'Clockwise', nodes, scale)\n self._datacontroller.commit_change('path', result)",
"def getContourRep(self):\n\t\tvertex1 = [[self.startX, self.startY]]\n\t\tvertex2 = [[self.startX, self.endY]]\n\t\tvertex3 = [[self.endX, self.startY]]\n\t\tvertex4 = [[self.endX, self.endY]]\n\t\tvertices = [vertex1, vertex2, vertex3, vertex4]\n\t\treturn convexHull(np.asarray(vertices, dtype = np.int32))",
"def __str__(self):\n vertices = []\n for idx in range(3):\n v = self.vertices[idx]\n if v is not None:\n vertices.append(str(v))\n else:\n orig_idx, dest_idx = (idx - 1) % 3, (idx + 1) % 3\n orig, dest = self.vertices[orig_idx], self.vertices[dest_idx]\n halfway = (orig.x + dest.x) * .5, (orig.y + dest.y) * .5\n# print(halfway)\n d = orig.distance(dest)\n dx = dest.x - orig.x\n# print(d)\n# print(dx)\n dx /= d\n dy = dest.y - orig.y\n# print(dy)\n dy /= d\n dx *= d\n dy *= d\n pt_halfway = halfway[0] + dy, halfway[1] - dx\n# print(\"outside\", orig_idx, dest_idx, pt_halfway)\n vertices.append(\"{0[0]} {0[1]}\".format(pt_halfway))\n vertices.append(vertices[0])\n return \"POLYGON(({0}))\".format(\", \".join(vertices))",
"def _repr_(self):\n desc = ''\n if self.n_vertices()==0:\n desc += 'The empty lattice polytope'\n else:\n desc += 'A ' + repr(self.affine_dimension()) + '-dimensional lattice polytope'\n desc += ' in ZZ^' + repr(self.space_dimension())\n\n if self.n_vertices()>0:\n desc += ' with '\n desc += repr(self.n_vertices())\n if self.n_vertices()==1: desc += ' vertex'\n else: desc += ' vertices'\n return desc",
"def give_convex_hull(rand_points):\n return ConvexHull(rand_points)",
"def convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.\n # Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull \n lower = []\n for p in points:\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n lower.pop()\n lower.append(p)\n\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n\n # Concatenation of the lower and upper hulls gives the convex hull.\n # Last point of each list is omitted because it is repeated at the beginning of the other list. \n return lower[:-1] + upper[:-1]",
"def convex_hull(self):\n if self._faces is None:\n if self._vertices is None:\n return None\n self.triangulate()\n return self._convex_hull",
"def display_and_label_hulls(self, hulls, src):\n \n labels = []\n\n for hull in hulls:\n\n angle = 0\n MA = 1\n ma = 1\n try:\n _,(MA,ma),angle = cv.fitEllipse(hull)\n except:\n pass\n cosAngle = np.abs(np.cos(angle*np.pi/180))\n\n # Only human-classify hulls if it is reasonably a vertically oriented rectangle\n # This is a hueristic to not have to waste time clasifying hulls clearly not poles\n if (cosAngle < 1.75) and (cosAngle > 0.85) and (MA/ma < 0.28):\n cpy = src.copy()\n hull_img = cv.polylines(cpy, [hull], True, (0,0,255), 3)\n cv.imshow(\"Hull\", hull_img)\n keycode = cv.waitKey(0)\n if keycode == 49:\n labels.append((hull, 0))\n print(\"Not a Pole\")\n elif keycode == 50:\n labels.append((hull, 1))\n print(\"A Pole!\")\n else:\n raise Exception(\"Unexpected Key Pressed\")\n else:\n labels.append((hull, 0))\n cv.destroyAllWindows()\n return labels",
"def __str__(self):\n if self.getType() == FRAGMENT:\n t = \"FRAGMENT\"\n else:\n t = \"VERTEX\"\n if not self.isCompiled():\n s = \"not compiled\"\n else:\n s = \"compiled\"\n return \"file: {0}\\ntype: {1}\\nstatus: {2}\\n\".format(self.getPath(), t, s)",
"def __str__(self):\n return \"hl(\" + str(self.point) + \",\" + str(self.angle) + \")\"",
"def convex_hull(points):\n pointList = ExtendedTupleList(points)\n complete_ranges = pointList.range_within(0, 1)\n # Filters for four quadrants\n filters = [\n ((0, complete_ranges[1][\"max\"][2], \">=\"), (1, complete_ranges[0][\"max\"][2], \">=\")), #Q1\n ((0, complete_ranges[1][\"max\"][1], \"<=\"), (1, complete_ranges[0][\"min\"][2], \">=\")), #Q2\n ((0, complete_ranges[1][\"min\"][1], \"<=\"), (1, complete_ranges[0][\"min\"][1], \"<=\")), #Q3\n ((0, complete_ranges[1][\"min\"][2], \">=\"), (1, complete_ranges[0][\"max\"][1], \"<=\")) #Q4\n ]\n # Sorting reversals (True means Desc sort, False means Asc sort. Y sort given first)\n sorts = [\n (True, True),\n (True, False),\n (False, False),\n (False, True),\n ]\n hull = ExtendedTupleList([])\n # In CW order of quadrants...\n for index in [0, 3, 2, 1]:\n # Find all the relevant points\n quad_points = ExtendedTupleList([point for point in pointList.filter(filters[index])])\n # Sort them properly\n quad_points.double_sort(1, 0, reverse_outside=sorts[index][0], reverse_inside=sorts[index][1])\n # Build a convex line segment\n line_segment = convex_line_segment(quad_points, sorts[index][0], sorts[index][1])\n # Reverse it, if we need to\n if index % 2 == 1:\n line_segment.reverse()\n # Add all the points in, avoiding repeated points.\n hull.extend(line_segment, avoid_repeats=True)\n return hull",
"def convex_hull(l):\n\tpass",
"def show_convex_hull(points, input_choice, timing,percent_pts,size,hull_points = None):\n\texists = os.path.isdir('plots')\n\tif not exists: \n\t\tos.mkdir('plots')\n\n\n\tfor each in points:\n\t\tplt.plot(each[0],each[1],'o-')\n\n\tif hull_points is not None:\n\t\thull_pt_list = []\n\t\tfor each in hull_points:\n\t\t\thull_pt_list.append(list(each))\n\n\t\thull_pt_arr = np.asarray(hull_pt_list)\n\t\t# print(hull_pt_arr)\n\t\tplt.plot(hull_pt_arr[:,0],hull_pt_arr[:,1],'k-')\n\t\tfirst_coord = hull_pt_arr[0,:].reshape(1,2)\n\t\tlast_coord = hull_pt_arr[len(hull_pt_arr)-1,:].reshape(1,2)\n\n\t\tlast_coord_arr = np.append(first_coord, last_coord, axis = 0)\n\t\tplt.plot(last_coord_arr[:,0],last_coord_arr[:,1],'k-')\n\t\tplt.title(label = 'For input : '+input_choice+percent_pts+' time taken = '+str(timing)+' s\\n'+'N='+str(size))\n\t\n\tplt.savefig('plots/'+'Graham_Scan_'+str(input_choice)+str(percent_pts)+'_N='+str(size)+'.png')\n\tplt.show()",
"def to_str(self):\n return u\"Superellipse[{:.4g},{:.4g}]\".format(self.alpha0.l, self.alpha0.r)",
"def convex_hull(*args):\n from point import Point\n from line import Segment\n from polygon import Polygon\n\n def uniquify(a):\n # not order preserving\n return list(set(a))\n\n p = args[0]\n if isinstance(p, Point):\n p = uniquify(args)\n\n if len(p) == 1:\n return p[0]\n elif len(p) == 2:\n return Segment(p[0], p[1])\n\n def orientation(p, q, r):\n '''Return positive if p-q-r are clockwise, neg if ccw, zero if\n collinear.'''\n return (q[1] - p[1])*(r[0] - p[0]) - (q[0] - p[0])*(r[1] - p[1])\n\n # scan to find upper and lower convex hulls of a set of 2d points.\n U = []\n L = []\n p.sort()\n for p_i in p:\n while len(U) > 1 and orientation(U[-2], U[-1], p_i) <= 0:\n U.pop()\n while len(L) > 1 and orientation(L[-2], L[-1], p_i) >= 0:\n L.pop()\n U.append(p_i)\n L.append(p_i)\n U.reverse()\n convexHull = tuple(L + U[1:-1])\n\n if len(convexHull) == 2:\n return Segment(convexHull[0], convexHull[1])\n return Polygon(convexHull)",
"def visualHull(sils, length):\n result = sils.pop(0).cone(length)\n assert result.pnFacesInPoly()\n i = 0\n for s in sils:\n # print(i)\n assert result.pnFacesInPoly()\n result = result.intersection(s.cone(length), True)\n # result.plot()\n i += 1\n return result",
"def make_convex_hull(self):\n hull_points_d = []\n try:\n print \"self.V_bar_list_d******************\", self.V_bar_list_d\n hull = ConvexHull(self.V_bar_list_d)\n hull_vertices = hull.vertices\n\n for i in hull_vertices:\n hull_points_d.append(self.V_bar_list_d[i])\n\n except scipy.spatial.qhull.QhullError:\n hull_points_d = self.V_bar_list_d\n\n return hull_points_d",
"def formatted_str(self):\n # Verify correct vertex values\n self.verify_vertex_values()\n # Generate output string\n output_string = \"\"\n for line in range(9):\n for row in range(9):\n if self.grid[line][row] is None:\n output_string += \".\"\n else:\n output_string += str(self.grid[line][row])\n if row != 8:\n output_string += \" \"\n elif line != 8:\n output_string += \"\\n\"\n if row in [2, 5]:\n output_string += \"| \"\n if line in [2, 5]:\n output_string += \"------+-------+------\\n\"\n return output_string",
"def _convex_hull(points):\n\n # Sort the points lexicographically (tuples are compared lexicographically).\n # Remove duplicates to detect the case we have just one unique point.\n points = sorted(set(points))\n\n # Boring case: no points or a single point, possibly repeated multiple times.\n if len(points) <= 1:\n return points\n\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.\n # Returns a positive value, if OAB makes a counter-clockwise turn,\n # negative for clockwise turn, and zero if the points are collinear.\n def cross(o, a, b):\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\n\n # Build lower hull\n lower = []\n for p in points:\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\n lower.pop()\n lower.append(p)\n\n # Build upper hull\n upper = []\n for p in reversed(points):\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\n upper.pop()\n upper.append(p)\n\n # Concatenation of the lower and upper hulls gives the convex hull.\n # Last point of each list is omitted because it is repeated at the beginning of the other list.\n return lower[:-1] + upper[:-1]",
"def __str__(self):\n puzzle_string = '—' * 13 + '\\n'\n for i in range(self.PUZZLE_NUM_ROWS):\n for j in range(self.PUZZLE_NUM_COLUMNS):\n puzzle_string += '│{0: >2}'.format(str(self.position[i][j]))\n if j == self.PUZZLE_NUM_COLUMNS - 1:\n puzzle_string += '│\\n'\n\n puzzle_string += '—' * 13 + '\\n'\n return puzzle_string",
"def draw(self):\n drawing = \"\"\n \n if self.get_gene().get_choice_at(1) is 'c':\n drawing += 'o'\n else:\n drawing += 'x'\n \n if self.is_tft():\n drawing += \"tft\"\n return drawing\n elif self.is_t2t():\n drawing += \"t2t\"\n return drawing\n elif self.is_ftf():\n drawing += \"ftf\"\n return drawing\n\n rule = self.get_gene().get_defect_fraction()\n fraction_display = 0.166\n\n if rule >= 1.0:\n drawing += \"ddd\"\n elif rule > (5*fraction_display):\n drawing += \"ddc\"\n elif rule > (4*fraction_display):\n drawing += \"dcd\"\n elif rule > (3*fraction_display):\n drawing += \"dcc\"\n elif rule > (2*fraction_display):\n drawing += \"cdd\"\n elif rule > (1*fraction_display):\n drawing += \"cdc\"\n elif rule > (0*fraction_display):\n drawing += \"ccd\"\n else:\n drawing += \"ccc\"\n\n return drawing",
"def construct_convex_hull(vertices: Sequence[Point]) -> Polyhedron:\n coords = np.zeros((len(vertices),3))\n for i,vertex in enumerate(vertices):\n coords[i,:] = vertex.coordinates\n hull = qconvex(\"i\", coords)\n n_facets = int(hull[0])\n facets = []\n for facet_vertices_str in hull[1:]:\n facet_vertices_idx = [int(x) for x in facet_vertices_str.split(' ')]\n facet_vertices = [vertices[i] for i in facet_vertices_idx]\n facet = Facet([Contour.from_vertices(facet_vertices)])\n facets.append(facet)\n polyhedron = Polyhedron(facets)\n return polyhedron"
] | [
"0.66425604",
"0.63532376",
"0.6281747",
"0.60994506",
"0.60871357",
"0.5944821",
"0.5937618",
"0.5842721",
"0.5841963",
"0.5745582",
"0.57072836",
"0.5683671",
"0.5624012",
"0.5577006",
"0.55180144",
"0.54937017",
"0.5490255",
"0.5487594",
"0.54810053",
"0.5431776",
"0.5420677",
"0.5419913",
"0.54195774",
"0.54107594",
"0.53963655",
"0.5381174",
"0.5371979",
"0.5314691",
"0.530822",
"0.52644336"
] | 0.75023854 | 0 |
r""" Return the list of equal coefficients between self and other. | def equal_coefficients(self, other):
d = self._d
return [(i,j) for i in range(d) for j in range(d) \
if self[i][j] == other[i][j]] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __xor__(self, other):\n\n sym_diff = [value for value in self if value not in other]\n sym_diff.extend([value for value in other if value not in self])\n\n return sym_diff",
"def GetEqualConstrains(self):\n return _gmat_py.Spacecraft_GetEqualConstrains(self)",
"def coefficients(self) :\n raise NotImplementedError",
"def coefficients(self) :\n return self.__coefficients",
"def __pow__(self, other):\n n = len(self)\n\n v = zeros_como(self)\n\n for i in range(n):\n v[i] = self[i]**other\n\n return v",
"def coefficients(self):\n return self._coefficients",
"def coefficients(self):\n return self._coefficients",
"def __sub__(self, other: 'ModelParameters') -> 'ModelParameters':\n return ModelParameters([self[idx] - other[idx] for idx in range(len(self))])",
"def coefficients(self) -> np.ndarray:\n return self._coefficients",
"def get_coefficients(self):\n return self.coefficients",
"def get_coefficients(self):\n return self.coefficients",
"def __xor__(self, other):\n a, b = Trits.match_length(self, other)\n return Trits([x ^ y for x, y in zip(a, b)])",
"def getEquates(self) -> Iterator[ghidra.program.model.symbol.Equate]:\n ...",
"def __isub__(self, other):\n self.components = [c1 - c2 for (c1, c2) in zip(self, other)]\n return self",
"def __sub__( self, other ) :\n\n try :\n other = float( other )\n c_ls = self.copy( )\n for l, c_l in enumerate( c_ls ) : c_ls.coefficients[l] -= other\n except :\n self.checkSameSeriesType( other )\n c_l1, c_l2 = self.coefficients, other.coefficients\n if( len( self ) < len( other ) ) : c_l1, c_l2 = c_l2, c_l1\n c_ls = c_l1.copy( )\n for l, c_l in enumerate( c_l2 ) : c_ls.coefficients[l] += c_l\n return( c_ls )",
"def pd(self, other):\n return Matriz([self]).T() * Matriz([other])",
"def find_coefficients(self):\n self.make_matrix()\n self.coeffs = np.linalg.solve(self.global_matrix,self.global_vector)\n self.coeffs = np.append(self.coeffs, self.D) #Initial condition",
"def __eq__(self, other):\n if self.coeff != other.coeff:\n return False\n \n if self.GetKeggID() != other.GetKeggID():\n return False\n \n if self.phase.Name() != other.phase.Name():\n return False\n \n return True",
"def test_coefficient_orders(self):\n for i in range(2, 5):\n spec = {2*j: 0 for j in range(i)}\n bcs_ref = BoundaryConditions(spec, 2*i-2)\n bcs_main = BoundaryConditions(spec, 2*i)\n\n coeffs_ref = get_ext_coeffs(bcs_ref)[i-1]\n coeffs_main = get_ext_coeffs(bcs_main)[i-1]\n\n assert coeffs_ref == coeffs_main",
"def enthalpy_equality_func(self):\n residual = []\n for i in range(self.num_i):\n residual += [self.inl[i].h.val_SI - self.outl[i].h.val_SI]\n return residual",
"def coefficients(self, force_characters = False) :\n raise NotImplementedError",
"def __xor__(self, other):\r\n return self + other - 2 * self * other",
"def IsEqualOrder(self,other):\n return self.InferPolynomialDegree() == other.InferPolynomialDegree()",
"def coefficients(self):\r\n return self.coef_['x']",
"def __isub__(self, other):\n self.components = [c1 - c2 for (c1, c2) in zip(self.components, other.components)]\n return self",
"def __sub__(self, other):\n # \n # TODO - your code here\n #\n result = [];\n for i in range(self.h):\n result.append([a-b for a,b in zip(self.g[i],other.g[i])]);\n \n return Matrix(result);",
"def __and__(self, other):\n\n return [value for value in self if value in other]",
"def commutator(self, other) -> 'MultiVector':\n\n return ((self * other) - (other * self)) / 2",
"def commutes_with(self, other):\n if not isinstance(other, type(self)):\n raise TypeError(\n 'Can only test commutation with another MajoranaOperator.')\n\n if len(self.terms) == 1 and len(other.terms) == 1:\n return _majorana_terms_commute(\n list(self.terms.keys())[0],\n list(other.terms.keys())[0])\n return self * other == other * self",
"def __sub__(self, other):\n return Vector([c1 - c2 for (c1, c2) in zip(self.components, other.components)])"
] | [
"0.66310555",
"0.6198817",
"0.61889756",
"0.5865301",
"0.5818835",
"0.5683608",
"0.5683608",
"0.56740135",
"0.5648324",
"0.56273437",
"0.56273437",
"0.5593266",
"0.55242145",
"0.5514569",
"0.546532",
"0.5455155",
"0.5414487",
"0.5409538",
"0.5363544",
"0.53480685",
"0.5334692",
"0.53232247",
"0.53002703",
"0.5292918",
"0.52882516",
"0.52718186",
"0.5233594",
"0.5221504",
"0.5219258",
"0.5215172"
] | 0.78071773 | 0 |
r""" Evaluates this symbolic matrix at the integer point ``p``. | def eval(self, p):
from max_plus.max_plus_int import minus_infinity, IntegerMaxPlusMatrix
F = FreeModule(ZZ, self._nvars)
p = F(p)
mat = []
d = self.dim()
for i in range(d):
row = []
for j in range(d):
pts = self[i,j]
row.append(minus_infinity() if not pts else max(p.dot_product(v) for v in pts))
mat.append(row)
return IntegerMaxPlusMatrix(self._d, self._d, mat) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def evaluate(self,p):\n if not self.initialized: self.__initialize__()\n if self.vp0: p_ = 1-p\n else: p_ = p\n if self.ids_to_consider is None:\n #sum on all parametrized cell\n cf = np.sum(self.V[self.p_ids-1]*p_)/self.V_tot - self.max_v_frac\n else:\n cf = np.sum((self.V[self.ids_to_consider-1]*p_))/self.V_tot - self.max_v_frac\n return cf",
"def eval_poly(self, p):\n A = self\n m, n = A.shape\n\n if m != n:\n raise DMNonSquareMatrixError(\"Matrix must be square\")\n\n if not p:\n return self.zeros(self.shape, self.domain)\n elif len(p) == 1:\n return p[0] * self.eye(self.shape, self.domain)\n\n # Evaluate p(A) using Horner's method:\n # XXX: Use Paterson-Stockmeyer method?\n I = A.eye(A.shape, A.domain)\n p_A = p[0] * I\n for pi in p[1:]:\n p_A = A*p_A + pi*I\n\n return p_A",
"def matrix_simmetric_representate(self, p):\r\n if (p >0 and (p <= self.dimension()) ):\r\n v = self.basis_group_oriented_p_chains(p)\r\n p = p - 1\r\n ve = self.basis_group_oriented_p_chains(p)\r\n M = csr_matrix((len(ve.dic), len(v.dic)), dtype=np.int8).toarray()\r\n j = 0\r\n for u1 in list(v.dic.keys()):\r\n d = P_chains([u1],[v.dic[u1]])\r\n l = boundary_op_n(d).dic\r\n for u2 in list(l.keys()):\r\n i = 0\r\n for w in list(ve.dic.keys()):\r\n if (w == u2):\r\n M[i,j] = int((l)[u2])\r\n i = i + 1\r\n j = j + 1\r\n return M \r\n else:\r\n if (p == 0):\r\n return np.identity(len(list(self.basis_group_oriented_p_chains(0).dic.keys())))\r\n else:\r\n return False",
"def update_p(self, p: float):\n self.p = p\n for k, sequential in self.m_ops.items():\n if sequential[0].is_identity_op():\n sequential[-1].p = p",
"def EvaluateLocation(self, p_int, , p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ...",
"def _eval_coeff(self, pt):\n val = 1\n for a in self.args:\n val *= a.coeff(pt)\n return val",
"def regular(P):\n try:\n cols = P.shape[0]\n ans = np.ones((1, cols))\n # eq = np.matmul(ans, P)\n # s = np.array(np.arange(1, cols + 1))\n eq = np.vstack([P.T - np.identity(cols), ans])\n # va, vec = np.linalg .eig(P)\n results = np.zeros((cols, 1))\n results = np.vstack([results, np.array([1])])\n statetionary = np.linalg.solve(eq.T.dot(eq), eq.T.dot(results)).T\n # print(statetionary)\n # print(np.argwhere(statetionary < 0))\n if len(np.argwhere(statetionary < 0)) > 0:\n return None\n return statetionary\n except Exception as e:\n return None",
"def evalComponent(self, x, p):\n if p > 0 and p <= self.n:\n p = str(p)\n y = self[\"off\"] + self[\"lin\"] * x\n self._v1d.assignValues(\n {\"A\": self[\"A\" + p], \"al\": self[\"al\" + p], \"ad\": self[\"ad\" + p], \"mu\": self[\"mu\" + p]})\n y += self._v1d.evaluate(x)\n return y\n else:\n raise(PE.PyAValError(\"No such component (no. \" + str(p) + \")\", where=\"MultiVoigt1d::evalComponent\",\n solution=\"Use value between 1 and \" + str(self.n)))",
"def resolves_matrix(self):\n self.P = np.linalg.solve(self.M, self.f)",
"def cost_function(H, n_qubits, p, params):\n ini_state=plus_state(n_qubits)\n for i in range(p):\n ini_state=qaoa_step(ini_state,H,n_qubits,params=[params[2*i],params[2*i+1]])\n return ((sparse.spmatrix.getH(ini_state)).dot(H.dot(ini_state))).real, ini_state",
"def ap(self, P):\n if P.divides(self.conductor()):\n if (P*P).divides(self.conductor()):\n # It is 0, because the reduction is additive.\n return ZZ(0)\n else:\n # TODO: It is +1 or -1, but I do not yet know how to\n # compute which without using the L-function.\n return '?'\n else:\n return self._S.hecke_matrix(P)[0,0]",
"def evaluate_rijP(self, q):\n rP_i = self._parent._parent.bodies[self.body_id_i].evaluate_r(q, element_id=self.element_id, ksi=self.element_ksi)\n\n self.r_P_list[0] = rP_i\n\n # distance vector\n r_ij_P = rP_i - self.rP_j\n\n return r_ij_P",
"def evaluate(self) -> int:",
"def power(self,p):\r\n\t\t\r\n\t\t# raise to power\r\n\t\tr,o = Li._expand(self,p)\r\n\t\t\r\n\t\treturn Li(r)",
"def __evaluate(self, point):\n assert len(point) == len(self.weight)-1\n result = self.weight[0]\n for i in range(0,len(point)):\n result += self.weight[i+1] * point[i]\n return result",
"def _eval_coeff(self, pt):\n return sum(a.coeff(pt) for a in self.args)",
"def _compute_pTable(self, expand=False, factor=False,\n simplify=False):\n if self._has(\"p\"):\n return\n if not self._has(\"k\"):\n self.kTable(expand=expand, factor=factor, simplify=simplify)\n if not self._has(\"m\"):\n self.multiplicities(expand=expand, factor=factor,\n simplify=simplify)\n p = Array3D(self._.d + 1)\n self._compute_parameters(p, self._.P, self._.m, integral=True,\n name=PARAMETER, sym=SYMBOL)\n self._.p = p\n self.check_handshake()",
"def __imul__(self, s):\n val = _hypre.HypreParMatrix___imul__(self, s)\n\n # val.thisown = 0\n return self\n\n\n return val",
"def _precompute_xl(self, p: int) -> List[int]:\n res = [1]\n val = 1\n for _ in range(len(self._s)):\n val = (val * self.X) % p\n res.append(val)\n return res",
"def regular(P):\n try:\n dim = P.shape[0]\n q = (P - np.eye(dim))\n ones = np.ones(dim)\n q = np.c_[q, ones]\n QTQ = np.dot(q, q.T)\n bQT = np.ones(dim)\n answer = np.linalg.solve(QTQ, bQT)\n if np.all(answer > 0):\n return answer\n else:\n return None\n except Exception as e:\n return None",
"def evaluate(self, point):\n result = self.__evaluate(point)\n return -1 if result < 0 else 1",
"def _compute_parameters(self, p, P, m, integral=False, name=None,\n sym=None):\n for h in range(self._.d + 1):\n for i in range(self._.d + 1):\n for j in range(self._.d + 1):\n p[h, i, j] = full_simplify(\n sum(m[t] * P[t, h] * P[t, i] * P[t, j]\n for t in range(self._.d + 1))\n / (self._.n * P[0, h]))\n self._check_parameter(h, i, j, p[h, i, j],\n integral=integral,\n name=name, sym=sym)\n self._check_consistency(p, P[0], name=name, sym=sym)",
"def EvaluatePosition(self, , p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ...",
"def solve_ilp(self):\n\n ilp_solution = pylp.Solution()\n self.ilp_solver.set_constraints(self.constraints)\n message = self.ilp_solver.solve(ilp_solution)\n print(\"ILP solved with minimal value \" + str(ilp_solution.get_value()) + \" and status \" + message)\n\n solution = lil_matrix(self.graph.shape)\n for i in range(self.num_variables):\n print(\"value of var \" + str(i) + \" is \" + str(ilp_solution.get_vector()[i]))\n if ilp_solution.get_vector()[i] < 0.5:\n continue\n (u,v) = self.var_to_edge[i]\n solution[u,v] = self.graph[u,v] - self.min_cost + 1\n\n return solution",
"def Eval(self, r, ppar, pperp, v, gamma=None, p2=None, p=None, xi=None):\n while False:\n yield None",
"def a_ij(s, p, i=1, j=1): # (Validated)\n from math import sqrt\n if i == j:\n return s.c[i]['a'] # Return pure paramater\n else: # find mixture aij i =/= j\n return (1 - p.m['k'][i][j]) * sqrt(s.c[i]['a'] * s.c[j]['a'])",
"def I(x, y, l, p):\n \n return 0.5 / (mu * c) * A0**2 * ( u(x, y, l, p) )**2",
"def success_p(self, input_p = None):\r\n if input_p is None:\r\n input_p = uniform_p(self.n_inputs())\r\n return np.trace(np.dot(np.diag(input_p), self.matrix))",
"def compute_demand(self, p):\n \n G, h = spdiag([-1.0]*self.n), matrix(0.0, (self.n, 1))\n \n if self.type == 'quad':\n Q, r = self.data\n return solvers.qp(-Q, p-r, G, h)['x']\n\n if self.type == 'sqrt':\n def F(x=None, z=None):\n if x is None: return 0, matrix(1.0, (self.n, 1))\n u, Du, H = self.utility(x)\n f, Df = p.T*x - u, p.T - Du\n if z is None: return f, Df\n return f, Df, -z[0]*H\n return solvers.cp(F, G, h)['x']",
"def mod_inv(a,p):\r\n\r\n for i in range(1,p):\r\n if (i*a)%p==1: return i\r\n raise ValueError(str(a)+\" has no inverse mod \"+str(p))"
] | [
"0.6590414",
"0.64902985",
"0.5803352",
"0.576456",
"0.56841123",
"0.56735307",
"0.5545928",
"0.54923505",
"0.54681116",
"0.5413606",
"0.5399578",
"0.5339681",
"0.5316676",
"0.53084254",
"0.5288867",
"0.525981",
"0.52447516",
"0.5244498",
"0.5217221",
"0.521235",
"0.5206362",
"0.5195178",
"0.51789606",
"0.51632947",
"0.5100213",
"0.5087714",
"0.5081258",
"0.5078931",
"0.5053198",
"0.5036393"
] | 0.6615458 | 0 |
r""" Perform a cyclic swap on the vertices. This is used in multiplication of symbolic upper matrices. Currently it is suboptimal but on the other hand, this cost much less than whatever convex hull computation. | def vertex_cyclic_swap(nvars, l, i):
if i == 0 or not l:
return l
ll = []
F = l[0].parent()
for v in l:
assert not v[-i:]
ll.append(F(tuple(v[-i:]) + tuple(v[:-i])))
for v in ll: v.set_immutable()
return tuple(ll) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def vertex_swap(d, n, l, i1, i2, j1, j2):\n if i1 == i2 and j1 == j2:\n return l\n if i1 == j1:\n # (i1,i1) -> (i2,i2)\n assert i2 == j2\n def swap(v):\n swap2(d, n, v, i1, i2)\n elif i1 == i2:\n # (i,j1) -> (i,j2)\n def swap(v):\n swap2(d, n, v, j1, j2)\n elif j1 == j2:\n # (i1,j) -> (i2,j)\n def swap(v):\n swap2(d, n, v, i1, i2)\n elif i1 == j2 and i2 == j1:\n # (i1,j1) -> (j1,i1)\n def swap(v):\n swap2(d, n, v, i1, j1)\n elif i1 == j2:\n # (i1,j1) -> (i2,i1)\n def swap(v):\n swap3(d, n, v, j1, i1, i2)\n elif i2 == j1:\n # (i1,j1) -> (j1,j2)\n def swap(v):\n swap3(d, n, v, i1, j1, j2)\n else:\n # (i1,j1) -> (i2,j2)\n def swap(v):\n swap2(d, n, v, i1, i2)\n swap2(d, n, v, j1, j2)\n ll = []\n for v in l:\n v = v.__copy__()\n swap(v)\n v.set_immutable()\n ll.append(v)\n ll.sort()\n return tuple(ll)",
"def swap_vertices(self, i, j):\r\n store_vertex_i = self.vertices[i]\r\n store_vertex_j = self.vertices[j]\r\n self.vertices[j] = store_vertex_i\r\n self.vertices[i] = store_vertex_j\r\n for k in range(len(self.vertices)):\r\n for swap_list in [self.vertices[k].children, self.vertices[k].parents]:\r\n if i in swap_list:\r\n swap_list[swap_list.index(i)] = -1\r\n if j in swap_list:\r\n swap_list[swap_list.index(j)] = i\r\n if -1 in swap_list:\r\n swap_list[swap_list.index(-1)] = j",
"def apply_cycle(points, cycle):\n j = cycle[0]\n for i in cycle:\n points[i], points[j] = points[j], points[i] # swap points i and j",
"def SwapSides(self):\n for c in self.reactants:\n c.coeff = -c.coeff",
"def swap(ix, jx, ax, ay):\n tempx, tempy = ax[ix], ay[ix]\n ax[ix] = ax[jx]\n ay[ix] = ay[jx]\n ax[jx] = tempx\n ay[jx] = tempy",
"def test_swap(self, dim):\r\n graph = nx.complete_graph(dim)\r\n graph.remove_edge(0, dim - 1)\r\n s = list(range(dim - 1))\r\n assert set(clique.swap(s, graph)) == set(range(1, dim))",
"def swap(self, *args):\n return _osgAnimation.VertexList_swap(self, *args)",
"def _swap1(self, cids, iids):\n # The coupling indexes of the two legs to swap\n c1, c2 = cids\n # The index of the two legs to swap within the given coupling\n i1, i2 = iids\n assert c1 != c2\n\n # Get the connecting leg between the two couplings\n legs = self.get_legs()\n intersect = set(legs[c1]).intersection(set(legs[c2]))\n\n assert len(intersect) == 1 # Only one internal leg between couplings\n ileg = intersect.pop()\n # index of the internal leg in c1 and c2\n ii = [legs[cid].index(ileg) for cid in cids]\n\n assert ii[0] != i1 and ii[1] != i2\n # Check that the flow is consistent along the internal bond\n assert self.coupling[c1][ii[0]][1] is not self.coupling[c2][ii[1]][1]\n\n # Order such that first bond is in the one with out\n if self.coupling[c1][ii[0]][1]:\n c1, c2, i1, i2, ii = c2, c1, i2, i1, (ii[1], ii[0])\n assert not self.coupling[c1][ii[0]][1] and self.coupling[c2][ii[1]][1]\n\n def permute_key(key):\n copy = list(list(k) for k in key)\n copy[c1][i1], copy[c2][i2] = copy[c2][i2], copy[c1][i1]\n return copy\n self._coupling = tuple(tuple(c) for c in permute_key(self.coupling))\n f1, f2 = ([x[1] for x in self.coupling[c]] for c in (c1, c2))\n\n def mappingf(okey):\n nk = permute_key(okey)\n # All good interal symmetry sectors in for the swapped 1st coupling\n for k in sls.allowed_couplings(nk[c1], f1, ii[0], self.symmetries):\n # Assign the key of the internal leg\n nk[c1][ii[0]], nk[c2][ii[1]] = k, k\n if sls.is_allowed_coupling(nk[c2], f2, self.symmetries):\n yield tuple(tuple(e) for e in nk)\n\n prefdict = sls._prefswap1((i1, i2), ii)\n\n def prefactorf(okey, nkey):\n return np.prod([prefdict.get(ss, lambda x, y: 1.)(\n [el[i] for j in (c1, c2) for el in okey[j]],\n [el[i] for j in (c1, c2) for el in nkey[j]]\n ) for i, ss in enumerate(self.symmetries)])\n\n self._manipulate_coupling(mappingf, prefactorf)\n return self",
"def swap(self, adjacent_transposition):\n\n result = Tensor()\n for key_self in self.keys():\n # ensure that the swap can be made with the available slots\n if max(adjacent_transposition) < len(key_self):\n prefix = Tensor({Tensor._merge_keys(*key_self[0 : min(adjacent_transposition)]): self[key_self]})\n root = type(self)._clifford_swap(\n *key_self[min(adjacent_transposition) : max(adjacent_transposition) + 1]\n )\n postfix = Tensor({Tensor._merge_keys(*key_self[max(adjacent_transposition) + 1 :]): 1})\n result = result + prefix * root * postfix\n else:\n result = result + Tensor({key_self: self[key_self]})\n self.clear()\n self.update(result)\n return self",
"def test_flip_loop():\n conv = ToPointsAndSegments()\n ring = [ (0,0), (3,0), (3.8,2), (4,0), (6,0), (6.3, 2), (7,0), (10,0), (13,4), (10,5), (7,5), (6.5, 3), (6,5), (4,5), (3.5,3), (3,5), (0,5), (-2,2), (0,0)]\n conv.add_polygon([ring])\n skel = calc_skel(conv, pause=True, output=True)",
"def test_flip_loop2():\n conv = ToPointsAndSegments()\n ring = [ (0,0), (3,0), (3.8,2), (5,0), (6.3, 2), (7,0), (10,0), (13,4), (10,5), (7,5), (6.5, 3), (5,5), (3.5,3), (3,5), (0,5), (-2,2), (0,0)]\n conv.add_polygon([ring])\n skel = calc_skel(conv, pause=True, output=True)",
"def _swap2(self, cids, iids):\n # The coupling indexes of the two legs to swap\n c1, c2 = cids\n # The index of the two legs to swap within the given coupling\n i1, i2 = iids\n assert c1 != c2\n\n # Get the connecting coupling between the two couplings\n cnx = self.get_couplingnetwork().to_undirected(as_view=True)\n ci = set(nx.common_neighbors(cnx, *[self.coupling[ii] for ii in cids]))\n if len(ci) != 1:\n raise ValueError(f'cids: {cids} have {len(ci)} common neighbors')\n ci = self.coupling.index(ci.pop())\n\n # internal legs\n l1 = cnx.edges[self.coupling[c1], self.coupling[ci], 0]['leg']\n l2 = cnx.edges[self.coupling[c2], self.coupling[ci], 0]['leg']\n\n # index of the internal leg in c1 and c2\n legs = self.get_legs()\n il1, il2 = [[legs[x].index(ll) for x in (y, ci)]\n for y, ll in zip(cids, (l1, l2))]\n\n assert il1[0] != i1 and il2[0] != i2\n assert il1[1] != il2[1]\n # Check that the flow is consistent along the internal bond\n assert self.coupling[c1][il1[0]][1] is not self.coupling[ci][il1[1]][1]\n assert self.coupling[c2][il2[0]][1] is not self.coupling[ci][il2[1]][1]\n\n def permute_key(key):\n copy = list(list(k) for k in key)\n copy[c1][i1], copy[c2][i2] = copy[c2][i2], copy[c1][i1]\n return copy\n f1, f2, fi = ([x[1] for x in self.coupling[c]] for c in (c1, c2, ci))\n self._coupling = tuple(tuple(c) for c in permute_key(self.coupling))\n\n # All good interal symmetry sectors in for the swapped 1st coupling\n nkeys = set(tuple(tuple(e) for e in permute_key(k)) for k in self)\n c1set = {}\n r11, r12 = set(range(3)).difference([il1[0]])\n for k in set(key[c1] for key in nkeys):\n kn = (k[r11], k[r12])\n if kn not in c1set:\n c1set[kn] = set(\n sls.allowed_couplings(k, f1, il1[0], self.symmetries))\n c2set = {}\n r21, r22 = set(range(3)).difference([il2[0]])\n for k in set(key[c2] for key in nkeys):\n kn = (k[r21], k[r22])\n if kn not in c2set:\n c2set[kn] = set(\n sls.allowed_couplings(k, f2, il2[0], self.symmetries))\n\n vac = sls.vacuumIrrep(self.symmetries)\n Z1 = set().union(*c1set.values())\n Z2 = set().union(*c2set.values())\n rf = set(range(3)).difference([il1[1], il2[1]]).pop()\n fit = [fi[rf], fi[il1[1]], fi[il2[1]]]\n oks = {(k1, k2): set(sls.allowed_couplings((vac, k1, k2),\n fit, 0, self.symmetries))\n for k1, k2 in itertools.product(Z1, Z2)}\n\n def mappingf(okey):\n nk = permute_key(okey)\n set1 = c1set[(nk[c1][r11], nk[c1][r12])]\n set2 = c2set[(nk[c2][r21], nk[c2][r22])]\n for kk1 in set1:\n for kk2 in set2:\n if nk[ci][rf] not in oks[(kk1, kk2)]:\n continue\n\n # Assign the key of the internal leg\n nk[c1][il1[0]], nk[ci][il1[1]] = kk1, kk1\n nk[c2][il2[0]], nk[ci][il2[1]] = kk2, kk2\n yield tuple(tuple(e) for e in nk)\n\n prefdict = sls._prefswap2(iids, il1, il2, f1, f2, fi)\n\n def prefactorf(okey, nkey):\n flokey = [list(x) for x in\n zip(*[el for j in (c1, c2, ci) for el in okey[j]])]\n flnkey = [list(x) for x in\n zip(*[el for j in (c1, c2, ci) for el in nkey[j]])]\n return np.prod([prefdict.get(ss, lambda x, y: 1.)(o, n) for\n o, n, ss in zip(flokey, flnkey, self.symmetries)])\n\n self._manipulate_coupling(mappingf, prefactorf)\n return self",
"def contiguousFlip(currPath, i, j):\n if i != j and (i+1)%len(currPath)!=j and (j+1)%len(currPath)!=i:\n iP = i\n jP = j\n if (i < j):\n maxx=(j-i+1)//2\n else:\n maxx=(j+1+len(currPath)-i)//2\n for _ in range(maxx):\n temp = currPath[iP]\n currPath[iP] = currPath[jP]\n currPath[jP] = temp\n iP = (iP + 1)%len(currPath)\n jP = (jP - 1)%len(currPath)",
"def swap(self, *args):\n return _osgAnimation.mapVertexInfluence_swap(self, *args)",
"def test_swap_degree(self, dim):\r\n graph = nx.lollipop_graph(dim, 1)\r\n graph.remove_edge(0, dim - 1)\r\n graph.remove_edge(0, dim - 2)\r\n s = list(range(dim - 2))\r\n result = set(clique.swap(s, graph, node_select=\"degree\"))\r\n expected = set(range(1, dim - 2)) | {dim - 1}\r\n assert result == expected",
"def swap2(d, n, v, i, j):\n for a in range(n):\n for k in range(d):\n if k == i or k == j:\n continue\n x = a*d*d + d*i + k\n y = a*d*d + d*j + k\n v[x], v[y] = v[y], v[x]\n\n x = a*d*d + d*k + i\n y = a*d*d + d*k + j\n v[x], v[y] = v[y], v[x]\n\n x = a*d*d + d*i + i\n y = a*d*d + d*j + j\n v[x], v[y] = v[y], v[x]\n\n x = a*d*d + d*j + i\n y = a*d*d + d*i + j\n v[x], v[y] = v[y], v[x]",
"def _cswap(i, j, S):\n N = _rswap(i, j, S.transpose()).transpose()\n return N",
"def swap(C):\n \n return [c.swap() for c in C]",
"def invert(self):\n self.vertices.reverse()",
"def swap(self):\n return _coconut_tail_call(Eq, self.b, self.a)",
"def reduce_sequential(edges, start, end):\n dd = get_degrees_dictionary(edges) # O(len(edges))\n tvs = get_transition_vertexes(dd, start, end) # O(len(dd))\n logger.debug(\"dd: {}\".format(dd))\n logger.debug(\"tvs: {}\".format(tvs))\n\n for v in tvs: # for each vertex in transitional vertexes\n # edges\n ei1 = tvs[v][0]\n ei2 = tvs[v][1]\n\n e1 = edges[ei1] # e1 is going to save resulted edge\n e2 = edges[ei2] # e2 is going to become cycled and then removed\n\n # vertexes\n # v - vertex to be removed\n # v1 - vertex, connected to v by e1 edge (unchanged)\n # v2 - vertex, connected to v by e2 edge\n # will be moved to e1 substituting v there\n # edges list in transitional vertex dictionary will be updated\n\n logger.debug(\"Substituted {}: {}:{}, {}:{} -> \".format(\n v, ei1, e1, ei2, e2))\n\n # v is going to be substituted in e1 by value of \"not v\" vertex in e2\n substitute_index_in_ei2 = 1 - e2.index(v) # if vi=0 s=1; v=1 s=0\n\n # replace v in ei1 by substitute from ei2\n v2 = e2[substitute_index_in_ei2]\n\n e1[e1.index(v)] = v2\n e2[substitute_index_in_ei2] = v\n\n # here we will have 2 edges\n # edges[ei1] -> ['v1', 'v2', ?] #\n # edges[ei2] -> ['v', 'v', 5] # delay not changed\n\n # updated edges for substituted vertex in tvs dict to point to\n # ei1 edge instead of ei2\n # e.g. 'v2' was connected by ei2, now is connected by ei1\n\n if v2 != start and v2 != end:\n # v2 is not present in tvi and shouldn't be updated\n v2ei = tvs[v2] # list of edges indexes for v2\n vei = tvs[v] # list of edges indexes for v\n v2ei[v2ei.index(ei2)] = ei1\n\n logger.debug(\"tvs[{}][2] = t[1] : {} = {}\".format(\n v2,\n tvs[v2][2],\n t[1]))\n\n # update weight\n new_weight = e1[2] + e2[2]\n e1[2] = new_weight\n\n # normalize result edge\n redirect_edge_alpabetically(e1)\n\n # here we will have 2 edges\n # edges[ei1] -> ['v1', 'v2', 8] #\n # edges[ei2] -> ['v', 'v', 5] # delay not changed\n\n # only thing left is to remove the ei2 edge, this will be done later\n # not to break iteration over edges\n\n logger.debug(\"{}:{}, {}:{}\".format(ei1, e1, ei2, e2))\n\n # get indexes of edges to be removed\n indexes = [i for i in reversed(sorted([tvs[v][1] for v in tvs]))]\n logger.debug(\"Edges index removed after sequential update: {}\".format(\n indexes))\n\n for i in indexes:\n edges.pop(i)\n\n return len(tvs) # amount of edges removed",
"def _swap(self, i, j, k):\n\t\tif self.verbose:\n\t\t\tprint(i, k)\n\t\t\tprint(i, j)\n\t\t\tprint(j, k)\n\t\tself.arrangement[i],self.arrangement[k] = self.arrangement[k],self.arrangement[i]\n\t\tself.arrangement[i],self.arrangement[j] = self.arrangement[j],self.arrangement[i]\n\t\tself.arrangement[j],self.arrangement[k] = self.arrangement[k],self.arrangement[j]",
"def swap(self):\n if self.cnt_swap == 0:\n i = self.swaplist[self.cnt_swap][0]\n j = self.swaplist[self.cnt_swap][1]\n self.b[i], self.b[j] = self.b[j], self.b[i]\n self.f[i], self.f[j] = self.f[j], self.f[i]\n elif self.cnt_swap < self.nb_swaps:\n i = self.swaplist[self.cnt_swap - 1][0]\n j = self.swaplist[self.cnt_swap - 1][1]\n self.b[i], self.b[j] = self.b[j], self.b[i]\n self.f[i], self.f[j] = self.f[j], self.f[i]\n i = self.swaplist[self.cnt_swap][0]\n j = self.swaplist[self.cnt_swap][1]\n self.b[i], self.b[j] = self.b[j], self.b[i]\n self.f[i], self.f[j] = self.f[j], self.f[i]\n else:\n return 0\n self.cnt_swap += 1\n return 1",
"def _clifford_swap(cls, slot_i, slot_j) -> Tensor:\n\n return Tensor(\n {\n Tensor._merge_keys((slot_j,), (slot_i,)): -1,\n Tensor._merge_keys(): 2 * cls.symmetric_bilinear_form(slot_i, slot_j),\n }\n )",
"def cw_rotate(self):\n self.grid = [list(x) for x in zip(*self.grid[::-1])]\n self.find_edges()",
"def switch_vertex(vertex, cutVectors):\n\n cutVectors[vertex] = (-1) * cutVectors[vertex]",
"def flip_cycles_mesh(mesh):\n mesh.halfedge = dict((key, {}) for key in mesh.vertices_iter())\n for fkey, face in mesh.face.iteritems():\n mesh.face[fkey] = dict((nbr, key) for key, nbr in face.items())\n for u, v in face.iteritems():\n mesh.halfedge[v][u] = fkey\n if v not in mesh.halfedge[u]:\n mesh.halfedge[u][v] = None",
"def cyclic_sort_vertices_2d(Vlist):\n if len(Vlist)==0: return Vlist\n\n adjacency_matrix = Vlist[0].polyhedron().vertex_adjacency_matrix()\n result = [ Vlist.pop() ]\n while len(Vlist)>0:\n for i in range(len(Vlist)):\n if adjacency_matrix[Vlist[i].index(), result[-1].index()] == 1:\n result.append( Vlist.pop(i) )\n break;\n else:\n raise ValueError\n return result",
"def test_swap_weight(self, dim):\r\n graph = nx.complete_graph(dim)\r\n graph.remove_edge(dim - 1, dim - 3)\r\n graph.remove_edge(dim - 2, dim - 4)\r\n s = list(range(dim - 2))\r\n weights = list(range(dim))\r\n result = set(clique.swap(s, graph, node_select=weights))\r\n expected = set(range(dim - 3)) | {dim - 1}\r\n assert result == expected",
"def _rswap(i, j, S):\n N = copy.deepcopy(S)\n row = copy.deepcopy(N[i])\n N[i] = copy.deepcopy(N[j])\n N[j] = row\n return N"
] | [
"0.6700525",
"0.6548219",
"0.62071115",
"0.619347",
"0.6155958",
"0.61089027",
"0.6073555",
"0.6059136",
"0.6011071",
"0.59793395",
"0.5920292",
"0.591253",
"0.59060276",
"0.5894991",
"0.58805555",
"0.5820859",
"0.5787564",
"0.5771385",
"0.57236433",
"0.57217413",
"0.5709025",
"0.56853545",
"0.5668851",
"0.5668137",
"0.56564784",
"0.5648565",
"0.5632435",
"0.5571116",
"0.55704314",
"0.55592334"
] | 0.6623225 | 1 |
Creates a dashboard of plots for time steps, potential, kintetic, and total energy | def create_dashboard(h, t, k, p):
plt.style.use('seaborn')
# Initialize the dashboard
fig = plt.figure(figsize=(20, 8))
ax1 = fig.add_subplot(2, 2, 1)
ax2 = fig.add_subplot(2, 2, 2)
ax3 = fig.add_subplot(2, 2, 3)
ax4 = fig.add_subplot(2, 2, 4)
# Create individual graphs
dt_line, = ax1.plot(h, lw=3, c='k')
total_line, = ax2.plot(t, lw=3, c='#d62728')
k_line, = ax3.plot(k, lw=3, c='#1f77b4')
p_line = ax4.plot(p, lw=3, c='#2ca02c')
ax1.set_title(r'Variation in $\Delta t$')
ax1.set_ylabel(r'$\Delta t$')
ax2.set_title(r'Total Energy over Time')
ax2.set_ylabel('Total Energy')
ax3.set_title('Kinetic Energy over Time')
ax3.set_ylabel('Kinetic Energy')
ax3.set_xlabel('Time Steps')
ax4.set_title('Potential Energy over Time')
ax4.set_ylabel('Potential Energy')
ax4.set_xlabel('Time Steps')
plt.show()
"""im = ax[0, 0].imshow(model.lattice, cmap='Greys', vmin=-1, vmax=1)
energy_line, = ax[0, 1].plot([], [], lw=3)
mag_line, = ax[1, 0].plot([], [], lw=3)
heat_line, = ax[1, 1].plot([], [], lw=3)
susceptibility_line, = ax[2, 0].plot([], [], lw=3)
acceptance_line, = ax[2, 1].plot([], [], lw=3)""" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_vanHove_dt(comp,conn,start,step_size,steps):\n \n (fin,) = conn.execute(\"select fout from comps where comp_key = ?\",comp).fetchone()\n (max_step,) = conn.execute(\"select max_step from vanHove_prams where comp_key = ?\",comp).fetchone()\n Fin = h5py.File(fin,'r')\n g = Fin[fd('vanHove',comp[0])]\n\n temp = g.attrs['temperature']\n dtime = g.attrs['dtime']\n\n\n # istatus = plots.non_i_plot_start()\n \n fig = mplt.figure()\n fig.suptitle(r'van Hove dist temp: %.2f dtime: %d'% (temp,dtime))\n dims = figure_out_grid(steps)\n \n plt_count = 1\n outs = []\n tmps = []\n for j in range(start,start+step_size*steps, step_size):\n (edges,count,x_lim) = _extract_vanHove(g,j+1,1,5)\n if len(count) < 50:\n plt_count += 1\n continue\n #count = count/np.sum(count)\n \n sp_arg = dims +(plt_count,)\n ax = fig.add_subplot(*sp_arg)\n ax.grid(True)\n\n \n alpha = _alpha2(edges,count)\n \n ax.set_ylabel(r'$\\log{P(N)}$')\n ax.step(edges,np.log((count/np.sum(count))),lw=2)\n ax.set_title(r'$\\alpha_2 = %.2f$'%alpha + ' j:%d '%j )\n ax.set_xlim(x_lim)\n plt_count += 1\n\n mplt.draw()\n\n # plots.non_i_plot_start(istatus)\n\n del g\n Fin.close()\n del Fin",
"def results_plot_fuel_reactor(self):\n \n import matplotlib.pyplot as plt \n\n # Total pressure profile\n P = []\n for z in self.MB_fuel.z:\n P.append(value(self.MB_fuel.P[z]))\n fig_P = plt.figure(1)\n plt.plot(self.MB_fuel.z, P)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Total Pressure [bar]\") \n\n # Temperature profile\n Tg = []\n Ts = []\n# Tw = []\n for z in self.MB_fuel.z:\n Tg.append(value(self.MB_fuel.Tg[z] - 273.15))\n Ts.append(value(self.MB_fuel.Ts[z] - 273.15))\n# Tw.append(value(self.MB_fuel.Tw[z]))\n fig_T = plt.figure(2)\n plt.plot(self.MB_fuel.z, Tg, label='Tg')\n plt.plot(self.MB_fuel.z, Ts, label='Ts')\n# plt.plot(self.MB_fuel.z, Tw, label='Tw')\n plt.legend(loc=0,ncol=2)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Temperature [C]\") \n \n # Superficial gas velocity and minimum fluidization velocity\n vg = []\n umf = []\n for z in self.MB_fuel.z:\n vg.append(value(self.MB_fuel.vg[z]))\n umf.append(value(self.MB_fuel.umf[z]))\n fig_vg = plt.figure(3)\n plt.plot(self.MB_fuel.z, vg, label='vg')\n plt.plot(self.MB_fuel.z, umf, label='umf')\n plt.legend(loc=0,ncol=2)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Superficial gas velocity [m/s]\")\n \n # Gas components molar flow rate\n for j in self.MB_fuel.GasList:\n F = []\n for z in self.MB_fuel.z:\n F.append(value(self.MB_fuel.F[z,j]))\n fig_F = plt.figure(4)\n plt.plot(self.MB_fuel.z, F, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.GasList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Gas component molar flow rate, F [mol/s]\") \n \n # Bulk gas phase total molar flow rate\n Ftotal = []\n for z in self.MB_fuel.z:\n Ftotal.append(value(self.MB_fuel.Ftotal[z]))\n fig_Ftotal = plt.figure(5)\n plt.plot(self.MB_fuel.z, Ftotal)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Total molar gas flow rate [mol/s]\") \n\n # Solid components mass flow rate\n for j in self.MB_fuel.SolidList:\n M = []\n for z in self.MB_fuel.z:\n M.append(value(self.MB_fuel.Solid_M[z,j]))\n fig_M = plt.figure(6)\n plt.plot(self.MB_fuel.z, M, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.SolidList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Solid components mass flow rate [kg/s]\")\n \n # Bulk solid phase total molar flow rate\n Mtotal = []\n for z in self.MB_fuel.z:\n Mtotal.append(value(self.MB_fuel.Solid_M_total[z]))\n fig_Mtotal = plt.figure(7)\n plt.plot(self.MB_fuel.z, Mtotal)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Solid total mass flow rate [kg/s]\") \n \n # Gas phase concentrations\n for j in self.MB_fuel.GasList:\n Cg = []\n for z in self.MB_fuel.z:\n Cg.append(value(self.MB_fuel.Cg[z,j]))\n fig_Cg = plt.figure(8)\n plt.plot(self.MB_fuel.z, Cg, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.GasList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Concentration [mol/m3]\") \n \n # Gas phase mole fractions\n for j in self.MB_fuel.GasList:\n y = []\n for z in self.MB_fuel.z:\n y.append(value(self.MB_fuel.y[z,j]))\n fig_y = plt.figure(9)\n plt.plot(self.MB_fuel.z, y, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.GasList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"y [-]\") \n \n # Solid phase mass fractions\n for j in self.MB_fuel.SolidList:\n x = []\n for z in self.MB_fuel.z:\n x.append(value(self.MB_fuel.x[z,j]))\n fig_x = plt.figure(10)\n plt.plot(self.MB_fuel.z, x, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.SolidList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"x [-]\") \n\n # Total mass fraction\n xtot = []\n for z in self.MB_fuel.z:\n xtot.append(value(self.MB_fuel.xtot[z]))\n fig_xtot = plt.figure(11)\n plt.plot(self.MB_fuel.z, xtot)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Total mass fraction [-]\") \n \n # # Gas mix density\n # rhog = []\n # for z in self.MB_fuel.z:\n # rhog.append(value(self.MB_fuel.rho_vap[z]))\n # fig_rhog = plt.figure(23)\n # plt.plot(self.MB_fuel.z, rhog)\n # plt.grid()\n # plt.xlabel(\"Bed height [-]\")\n # plt.ylabel(\"Gas mix density [kg/m3]\") \n \n # Fe conversion\n X_Fe = []\n for z in self.MB_fuel.z:\n X_Fe.append(value(self.MB_fuel.X[z])*100)\n fig_X_Fe = plt.figure(13)\n plt.plot(self.MB_fuel.z, X_Fe)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Fraction of metal oxide converted [%]\")",
"def visualize(self):\n print('{0} is {1} time steps old'.format(self.name, self.timestep))\n\n self.amygdala.visualize(self.timestep, self.name, self.log_dir)\n self.cerebellum.visualize(self.name, self.log_dir)\n self.cingulate.visualize(self.name, self.log_dir)\n self.hippocampus.visualize(self.name, self.log_dir)\n #self.ganglia.visualize(self.name, self.log_dir)\n #self.cortex.visualize(self.name, self.log_dir)",
"def main():\n # Load properties that will be needed\n store = [Storage.Storage(2), Storage.Storage(4)] \n pre_energy = [s.get(\"free_energy\") for s in store]\n post_energy = [s.get(\"post_energy\") for s in store]\n x_range = store[0].get(\"x_range\")\n xlocs = np.arange(x_range[0], x_range[1], x_range[2])\n y_range = store[0].get(\"y_range\")\n ylocs = np.arange(y_range[0], y_range[1], y_range[2])\n # Calculate step size\n xb2steps = stepsize(pre_energy[0], post_energy[0], xlocs) \n xb4steps = stepsize(pre_energy[1], post_energy[1], xlocs) \n # Set up the figure\n fig = plt.figure(1, figsize=(7.5,2.5)) \n axe = (fig.add_subplot(1, 2, 1), fig.add_subplot(1, 2, 2))\n # Plot the results\n axe[0].plot(ylocs, xb4steps, color='#FF466F', lw=4)\n axe[1].plot(ylocs, xb2steps, color='#76D753', lw=4)\n # Annotate the plots\n axe[0].set_title(\"4sXB step size\")\n axe[0].set_xlabel(\"Lattice spacing (nm)\") \n axe[0].set_ylabel(\"Step size (nm)\")\n axe[0].set_xlim((25.5, 39))\n axe[0].set_ylim((1, 8))\n axe[1].set_title(\"2sXB step size\")\n axe[1].set_xlabel(\"Lattice spacing (nm)\") \n axe[1].set_ylabel(\"Step size (nm)\")\n axe[1].set_xlim((25.5, 39))\n axe[1].set_ylim((1, 8))\n # Display the plots\n fig.subplots_adjust(wspace=0.25, hspace=0.48,\n left=0.08, right=0.98,\n top=0.85, bottom=0.21)\n plt.show()",
"def main_time_chart(self) -> Component:\n logger.debug('Generating time graph.')\n df = self.activity_manager.metadata_weekly_time_series(activity_type='run')\n\n freq_dropdown = dcc.Dropdown('overview_main_time_chart_freq_dropdown', options=[\n {'label': 'Weekly', 'value': 'weekly'},\n {'label': 'Monthly', 'value': 'monthly'}\n ], value='monthly')\n\n y_dropdown = dcc.Dropdown('overview_main_time_chart_y_dropdown', options=[\n {'label': 'Average speed', 'value': 'mean_speed'},\n {'label': 'Total distance', 'value': 'total_distance'},\n {'label': 'Total duration', 'value': 'total_duration'},\n {'label': 'Average heart rate', 'value': 'mean_hr'},\n {'label': 'Number of activities', 'value': 'activity_count'}\n ], value='activity_count')\n\n graph = dcc.Graph(\n id='overview_main_time_chart',\n figure=self.main_time_fig('weekly', 'activity_count')\n )\n return html.Div([\n html.H2('Progress over time'),\n dbc.Row([\n dbc.Col(html.Div(['Frequency:', freq_dropdown])),\n dbc.Col(html.Div(['y axis:', y_dropdown]))\n ]),\n graph\n ])",
"def visualizations():\r\n raise NotImplementedError\r\n # df = pandas.read_csv('accidents_by_hour.csv', index_col=0, header=0)\r\n # plt.plot(0, 0, data=df)\r\n # plt.show()\r",
"def plot(self):\n\t\tplot_chain(self.database_path, self.temp_folder)\n\t\tplot_density(self.database_path, self.temp_folder, self.cal_params)",
"def display(self):\n \n # initialize SQL kit to access database\n s = SQL_Kit(self.userID, self.password, self.database)\n \n \n \"\"\" Total Activity by hour \"\"\"\n \n # get activity data\n all_date_times = self.activity().index\n\n all_days = []\n all_hours = []\n for item in all_date_times:\n all_days.append((item.timetuple().tm_yday))\n all_hours.append(item.hour)\n\n x = all_days\n y = all_hours\n x_labels = pd.Series(all_days).unique()\n\n fig1, ax1 = plt.subplots()\n ax1.set_title('Hourly Activity')\n ax1.scatter(x,y,color='mediumspringgreen',linewidths=1)\n ax1.set_xlabel('day of year')\n ax1.set_ylabel('hour')\n ax1.xaxis.grid(True)\n\n if len(x_labels) > 5:\n ax1.xaxis.set_ticks([min(all_days), max(all_days)])\n else:\n ax1.xaxis.set_ticks(x_labels)\n\n ax1.yaxis.grid(False) \n plt.show()\n \n \n \"\"\" MOVING AVERAGE \"\"\"\n \n df = self.activity().reset_index()\n\n def day_of_year(datetime_entry):\n return datetime_entry.timetuple().tm_yday\n\n df['day_of_year'] = list(df.apply(lambda x: day_of_year(x['EventDateTime']),axis=1))\n daily_count = df['day_of_year'].value_counts().sort_index()\n\n averages = []\n i=1\n for value_count in daily_count:\n values = daily_count[:i]\n average = round(sum(values)/len(values),2)\n averages.append(average)\n i+=1\n\n day_list = list(df['day_of_year'].unique())\n\n avg_move_df = pd.DataFrame([day_list,averages]).T\n avg_move_df.rename(columns={0: 'day_id', 1: 'moving_avg'},inplace=True)\n avg_move_df.set_index('day_id',inplace=True)\n \n fig1, ax1 = plt.subplots()\n ax1.plot(avg_move_df.index.astype(int),avg_move_df['moving_avg'], color='mediumspringgreen')\n ax1.set_title('Moving AVG')\n ax1.set_xlabel('day_of_year')\n ax1.xaxis.set_ticks([min(all_days), max(all_days)])\n ax1.set_ylabel('Daily Activity')\n plt.show()\n \n \n \n \"\"\" Top 5 Samples \"\"\"\n \n data = s.select_table('sample')['SoundCategory'].value_counts()\n \n objects = list(data)[:5]\n y_pos = list(data.index)[:5]\n\n # get class info from class_absence_stats dataframe\n #fig2 = plt.figure(2) \n plt.bar(y_pos, objects, align='center', alpha=0.8, color='mediumspringgreen')\n plt.ylabel('Usage')\n plt.xlabel('Sound Category')\n plt.title('Top 5 Samples')\n plt.show()\n \n \n \"\"\" Top 3 Chords \"\"\"\n \n data = s.select_table('chord')['ChordLabel'].value_counts()\n\n objects = list(data)[:3]\n y_pos = list(data.index)[:3]\n\n # get class info from class_absence_stats dataframe\n #fig2 = plt.figure(2) \n plt.bar(y_pos, objects, align='center', alpha=0.8, color='mediumspringgreen')\n plt.ylabel('Usage')\n plt.xlabel('Chord Label')\n plt.title('Top 3 Chords')\n plt.show()\n \n \n \"\"\" Top 3 Wave Types \"\"\"\n \n # get SQL table data\n set_1 = s.select_table('createwave')\n set_2 = s.select_table('sequence')\n set_3 = s.select_table('arpeggio')\n set_4 = s.select_table('chord')\n\n # concat tables into single pandas series\n all_wave_types = pd.concat([set_1['WaveType'], set_2['WaveType'], set_3['WaveType'], set_4['WaveType']])\n\n # sort values, show top 3\n top_3 = all_wave_types.value_counts().head(3)\n\n\n # Pie chart, where the slices will be ordered and plotted counter-clockwise:\n labels = list(top_3.index)\n sizes = list(top_3.values)\n explode = (0, 0, 0) # only \"explode\" the 2nd slice (i.e. 'Hogs')\n\n fig1, ax1 = plt.subplots()\n ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',\n shadow=True, colors=['g','b','r'], startangle=90)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n ax1.set_title('Top Wave Types')\n\n plt.show()",
"def energies():\n # Hardcoded initial values\n numsteps = 10000\n time_max = 1\n # Running the calculation in the solver class using the velocity verlet method\n # for better accuracy.\n verlet = solver(input_matrix, 'verlet', time_max, numsteps)\n output_matrix, KE, PE, AM = verlet.main()\n # Creating a simple time axis for plotting\n x = np.linspace(0, 1, numsteps+1)\n\n # Plotting kinetic energy over time\n plt.figure(1, figsize=(10, 10))\n plt.plot(x, KE)\n plt.suptitle('Total kinetic energy in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²*kg/yr²]', fontsize=16)\n plt.legend(['KE'])\n\n # Plotting potential energy over time\n plt.figure(2, figsize=(10, 10))\n plt.plot(x, PE)\n plt.suptitle('Total potential energy in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²*kg/yr²]', fontsize=16)\n plt.legend(['PE'])\n\n # Plotting total energy against time\n plt.figure(3, figsize=(10, 10))\n plt.plot(x, PE+KE)\n plt.suptitle('Total energy in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²*kg/yr²]', fontsize=16)\n plt.legend(['KE+PE'])\n\n # Plotting angular momentum against time. print the amplitude to terminal\n amplitude = max(AM)-min(AM)\n print('Amplitude of angular momentum during 1 year: %g[AU²/yr²]' %(amplitude))\n plt.figure(4, figsize=(10, 10))\n plt.plot(x, AM)\n plt.suptitle('Total angular momentum in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²/yr²]', fontsize=16)\n plt.legend(['AM'])\n\n # Plotting the kinetic, potential and total energy against time to see\n # how great the variations are\n plt.figure(5, figsize=(10, 10))\n plt.plot(x, PE, x, KE, x, KE+PE)\n plt.suptitle('Total energy in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²*kg/yr²]', fontsize=16)\n plt.legend(['PE', 'KE', 'KE+PE'])\n plt.show()",
"def main():\n save = False\n show = True\n\n #hd_parameter_plots = HDparameterPlots(save=save)\n #hd_parameter_plots.flow_parameter_distribution_for_non_lake_cells_for_current_HD_model()\n #hd_parameter_plots.flow_parameter_distribution_current_HD_model_for_current_HD_model_reprocessed_without_lakes_and_wetlands()\n #hd_parameter_plots.flow_parameter_distribution_ten_minute_data_from_virna_0k_ALG4_sinkless_no_true_sinks_oceans_lsmask_plus_upscale_rdirs()\n #hd_parameter_plots.flow_parameter_distribution_ten_minute_data_from_virna_0k_ALG4_sinkless_no_true_sinks_oceans_lsmask_plus_upscale_rdirs_no_tuning()\n #ice5g_comparison_plots = Ice5GComparisonPlots(save=save)\n #ice5g_comparison_plots.plotLine()\n #ice5g_comparison_plots.plotFilled()\n #ice5g_comparison_plots.plotCombined()\n #ice5g_comparison_plots.plotCombinedIncludingOceanFloors()\n #flowmapplot = FlowMapPlots(save)\n #flowmapplot.FourFlowMapSectionsFromDeglaciation()\n #flowmapplot.Etopo1FlowMap()\n #flowmapplot.ICE5G_data_all_points_0k()\n #flowmapplot.ICE5G_data_all_points_0k_no_sink_filling()\n #flowmapplot.ICE5G_data_all_points_0k_alg4_two_color()\n #flowmapplot.ICE5G_data_all_points_21k_alg4_two_color()\n #flowmapplot.Etopo1FlowMap_two_color()\n #flowmapplot.Etopo1FlowMap_two_color_directly_upscaled_fields()\n #flowmapplot.Corrected_HD_Rdirs_FlowMap_two_color()\n #flowmapplot.ICE5G_data_ALG4_true_sinks_21k_And_ICE5G_data_ALG4_true_sinks_0k_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_Etopo1_ALG4_sinkless_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_Etopo1_ALG4_true_sinks_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_sinkless_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_true_sinks_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_corr_orog_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_data_ALG4_no_true_sinks_corr_orog_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Corrected_HD_Rdirs_And_ICE5G_HD_as_data_ALG4_true_sinks_0k_directly_upscaled_fields_FlowMap_comparison()\n #flowmapplot.Upscaled_Rdirs_vs_Directly_Upscaled_fields_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplot.Ten_Minute_Data_from_Virna_data_ALG4_corr_orog_downscaled_lsmask_no_sinks_21k_vs_0k_FlowMap_comparison()\n #flowmapplot.Upscaled_Rdirs_vs_Corrected_HD_Rdirs_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n flowmapplotwithcatchment = FlowMapPlotsWithCatchments(save)\n #flowmapplotwithcatchment.Upscaled_Rdirs_vs_Corrected_HD_Rdirs_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.compare_present_day_and_lgm_river_directions_with_catchments_virna_data_plus_tarasov_style_orog_corrs_for_both()\n #flowmapplotwithcatchment.compare_present_day_river_directions_with_catchments_virna_data_with_vs_without_tarasov_style_orog_corrs()\n #flowmapplotwithcatchment.compare_lgm_river_directions_with_catchments_virna_data_with_vs_without_tarasov_style_orog_corrs()\n #flowmapplotwithcatchment.Upscaled_Rdirs_vs_Corrected_HD_Rdirs_tarasov_upscaled_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.upscaled_rdirs_with_and_without_tarasov_upscaled_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.\\\n #upscaled_rdirs_with_and_without_tarasov_upscaled_north_america_only_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.\\\n #Upscaled_Rdirs_vs_Corrected_HD_Rdirs_tarasov_upscaled_north_america_only_data_ALG4_corr_orog_downscaled_ls_mask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.\\\n #Upscaled_Rdirs_vs_Corrected_HD_Rdirs_tarasov_upscaled_north_america_only_data_ALG4_corr_orog_glcc_olson_lsmask_0k_FlowMap_comparison()\n #flowmapplotwithcatchment.compare_present_day_and_lgm_river_directions_with_catchments_ICE5G_plus_tarasov_style_orog_corrs_for_both()\n #flowmapplotwithcatchment.compare_present_day_and_lgm_river_directions_with_catchments_ICE6G_plus_tarasov_style_orog_corrs_for_both()\n #flowmapplotwithcatchment.compare_ICE5G_and_ICE6G_with_catchments_tarasov_style_orog_corrs_for_both()\n #flowmapplotwithcatchment.compare_river_directions_with_dynriver_corrs_and_MERIThydro_derived_corrs()\n #flowmapplotwithcatchment.compare_river_directions_with_dynriver_corrs_and_MERIThydro_derived_corrs_original_ts()\n flowmapplotwithcatchment.compare_river_directions_with_dynriver_corrs_and_MERIThydro_derived_corrs_new_ts_10min()\n outflowplots = OutflowPlots(save)\n #outflowplots.Compare_Upscaled_Rdirs_vs_Directly_Upscaled_fields_ICE5G_data_ALG4_corr_orog_downscaled_ls_mask_0k()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_as_HD_data_ALG4_sinkless_all_points_0k()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_as_HD_data_ALG4_true_sinks_all_points_0k()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_ALG4_sinkless_all_points_0k_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_ALG4_true_sinks_all_points_0k_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_ALG4_corr_orog_all_points_0k_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_ALG4_corr_orog_downscaled_ls_mask_all_points_0k_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_Etopo1_ALG4_sinkless_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_Etopo1_ALG4_true_sinks_directly_upscaled_fields()\n #outflowplots.Compare_Corrected_HD_Rdirs_And_ICE5G_plus_tarasov_upscaled_srtm30_ALG4_corr_orog_0k_directly_upscaled_fields()\n #outflowplots.Compare_Original_Corrections_vs_Upscaled_MERIT_DEM_0k()\n outflowplots.Compare_Original_Corrections_vs_Upscaled_MERIT_DEM_0k_new_truesinks()\n #outflowplots.Compare_Original_Corrections_vs_Upscaled_MERIT_DEM_0k_new_truesinks_individual_rivers()\n #outflowplots.Compare_ICE5G_with_and_without_tarasov_upscaled_srtm30_ALG4_corr_orog_0k_directly_upscaled_fields()\n #hd_output_plots = HDOutputPlots()\n #hd_output_plots.check_water_balance_of_1978_for_constant_forcing_of_0_01()\n #hd_output_plots.plot_comparison_using_1990_rainfall_data()\n #hd_output_plots.plot_comparison_using_1990_rainfall_data_adding_back_to_discharge()\n #coupledrunoutputplots = CoupledRunOutputPlots(save=save)\n #coupledrunoutputplots.ice6g_rdirs_lgm_run_discharge_plot()\n #coupledrunoutputplots.extended_present_day_rdirs_lgm_run_discharge_plot()\n #coupledrunoutputplots.ocean_grid_extended_present_day_rdirs_vs_ice6g_rdirs_lgm_run_discharge_plot()\n #coupledrunoutputplots.extended_present_day_rdirs_vs_ice6g_rdirs_lgm_echam()\n #coupledrunoutputplots.extended_present_day_rdirs_vs_ice6g_rdirs_lgm_mpiom_pem()\n #lake_plots = LakePlots()\n #lake_plots.plotLakeDepths()\n #lake_plots.LakeAndRiverMap()\n #lake_plots.LakeAndRiverMaps()\n if show:\n plt.show()",
"def show_dashboard():\n script, div = plots.make_plot()\n script_tab, div_tab = plots.make_tabs()\n script_trend, div_trend = plots.make_trend()\n\n return render_template('layout.html',\n script=script,\n div=div,\n script_trend=script_trend,\n div_trend=div_trend,\n script_tab=script_tab,\n div_tab=div_tab)",
"def send_to_dashboard():\n\t# purchased and conventional\n\tpurchase_activity_conv = db.session.query(PurchaseActivity).filter_by(purchased=True, conventional=True).all()\n\t# purchased and organic\n\tpurchase_activity_organic = db.session.query(PurchaseActivity).filter_by(purchased=True, organic=True).all()\n\t# search activity \n\tsearch_activity = db.session.query(SearchActivity).all()\n\n\t# plot search_activity over time and purchase_activity over time\n\n\tlist_of_dict = []\n\n\n\tdata = {}\n\tdatasets_dict = {}\n\tdatasets_dict['label'] = \"Search Activity, Items Purchased over Time\"\n\tdatasets_dict['fillColor'] = \"rgba(220,220,220,0.5)\"\n\tdatasets_dict['strokeColor'] = \"rgba(220,220,220,0.8)\"\n\tdatasets_dict['highlightFill'] = \"rgba(220,220,220,0.75)\"\n\tdatasets_dict['highlightStroke'] = \"rgba(220,220,220,1)\"\n\tdatasets_dict['data'] =search_activity, purchase_activity_organic, purchase_activity_conv\n\tdata['labels'] = time\n\tdata['datasets'] = [datasets_dict]\n \n\n\tlist_of_dict.append(data)\n\tprint list_of_dict \t\n\n\treturn render_template(\"/dashboard.html\")",
"def showPlot1():\n\n interested_in = list(range(5,30,5))\n proc_sim_data = []\n for item in interested_in:\n len_sim_data = []\n raw_sim_data = runSimulation(1, 1.0, item, item, 0.75, 100, Robot, False)\n for mes in raw_sim_data:\n len_sim_data.append(len(mes))\n proc_sim_data.append(sum(len_sim_data)/len(len_sim_data))\n plot(interested_in, proc_sim_data)\n title('Dependence of cleaning time on room size')\n xlabel('area of the room (tiles)')\n ylabel('mean time (clocks)')\n show()",
"def showPlot2():\n interested_in = list(range(1,10))\n proc_sim_data = []\n for item in interested_in:\n len_sim_data = []\n raw_sim_data = runSimulation(item, 1.0, 25, 25, 0.75, 100, Robot, False)\n for mes in raw_sim_data:\n len_sim_data.append(len(mes))\n proc_sim_data.append(sum(len_sim_data)/len(len_sim_data))\n plot(interested_in, proc_sim_data)\n title('Dependence of cleaning time on number of robots')\n xlabel('number of robots (tiles)')\n ylabel('mean time (clocks)')\n show()",
"def experimental_report(environment, species, time_series,path=None,events=None):\n\n\n M = len(environment)+1\n L = int(np.ceil(1 + len(time_series)/2))\n fig = plt.figure(figsize=(5*M,5*L))\n \n colormaps = [\"Greens\",\"bwr\",\"Blues\",\"Oranges\",\"RdPu\",\"Reds\"]\n for i,(k,v) in enumerate(environment):\n plt.subplot(L,M,i+1)\n plt.imshow(v,\n interpolation='None',\n cmap=colormaps[i%len(colormaps)],\n vmin=0,vmax=1,\n aspect=\"equal\")\n plt.xticks([])\n plt.yticks([])\n plt.title(k)\n plt.colorbar(orientation=\"horizontal\", fraction=0.045)\n plt.subplot(L,M,M)\n niches(species,path=path)\n\n colors = [\"blue\",\"green\",\"brown\",\"purple\",\"red\"]\n host = [host_subplot(L*100+10+2+j, axes_class=AA.Axes) for j in range(L-1)]\n\n\n for i,(k,v) in enumerate(time_series):\n #if False and i%2 != 0:\n # ax = host[int(i/2)].twinx()\n #else:\n ax = host[int(i/2)]\n ax.set_ylabel(k)\n if len(v) == 2:\n T = len(v[0])\n ax.plot(v[0],\n label=k,\n color=colors[i%len(colors)],\n linewidth=2)\n ax.fill_between(range(len(v[0])),\n v[0]-v[1], v[0]+v[1],\n alpha=0.3,\n color=colors[i%len(colors)])\n else:\n T = len(v)\n ax.plot(range(len(v)),v, color=colors[i%len(colors)], label=k)\n \n \n for h in host:\n h.set_xlim((0,T-1))\n h.legend()\n h.set_xlabel(\"Time\")\n \n h.set_ymargin(0.05)\n h.autoscale(enable=True, axis=u'both', tight=False)\n\n if events is not None:\n h.vlines(events,*h.get_ylim(),alpha=0.1)",
"def plot_energies(self):\n plt.plot(self.energies[0], self.energies[1])\n plt.xlabel('Time (s)')\n plt.ylabel('Energy (J)')\n plt.show()",
"def plot_steps(out_dict, units):\n from bokeh.models import BoxAnnotation\n from bokeh.plotting import figure, show, output_notebook\n import bokeh.models as bmd\n\n tooltips = [\n (\"Step (total)\", \"@index\"),\n (\"Step (stage)\", \"@step\"),\n (\"Energy\", \"@energy eV/atom\"),\n (\"Energy (dispersion)\", \"@dispersion_energy_au Ha\"),\n (\"SCF converged\", \"@scf_converged\"),\n (\"Cell A\", \"@cell_a_angs Angs\"),\n (\"Cell Vol\", \"@cell_vol_angs3 Angs^3\"),\n (\"MAX Step\", \"@max_step_au Bohr\"),\n (\"Pressure\", \"@pressure_bar bar\")\n ]\n hover = bmd.HoverTool(tooltips=tooltips)\n TOOLS = [\"pan\", \"wheel_zoom\", \"box_zoom\", \"reset\", \"save\", hover]\n\n natoms = out_dict['natoms']\n values = [ x/natoms*ha2u[units] for x in out_dict['step_info']['energy_au'] ]\n values = [ x-min(values) for x in values ]\n\n data = bmd.ColumnDataSource(data=dict( index=range(len(values)),\n step=out_dict['step_info']['step'],\n energy=values,\n dispersion_energy_au=out_dict['step_info']['dispersion_energy_au'],\n scf_converged=out_dict['step_info']['scf_converged'],\n cell_a_angs=out_dict['step_info']['cell_a_angs'],\n cell_vol_angs3=out_dict['step_info']['cell_vol_angs3'],\n max_step_au=out_dict['step_info']['max_step_au'],\n pressure_bar=out_dict['step_info']['pressure_bar'],\n ))\n\n p = figure(tools=TOOLS, title='Energy profile of the DFT minimization',\n height=350, width=550)\n\n p.xgrid.grid_line_color=None\n p.xaxis.axis_label = 'Steps'\n p.yaxis.axis_label = 'Energy ({}/atom)'.format(units)\n\n # Colored background\n colors = ['red','orange','green','yellow','cyan','pink','palegreen']\n start = 0\n for i,steps in enumerate(out_dict['stage_info']['nsteps']):\n end = start+steps\n p.add_layout(BoxAnnotation(left=start, right=end, fill_alpha=0.2, fill_color=colors[i]))\n start = end\n\n # Trace line and markers\n p.line('index', 'energy', source=data, line_color='blue')\n p.circle('index', 'energy', source=data, line_color='blue', size=3)\n return p",
"def page_dashboard(state):\n\n st.title(\":chart_with_upwards_trend: Prediction Results Dashboard\")\n\n st.markdown(\"# Select Stocks to View Results:\")\n if state.finalized_data:\n for stock_data in state.finalized_data:\n st.write(\"---\")\n st.markdown(\"## \" + stock_data[\"stock\"])\n if st.checkbox(\"View Results for \" + stock_data[\"stock\"]):\n\n ############################################\n\n st.markdown(\"### Historical Predictions:\")\n\n df2 = pd.DataFrame.from_dict(stock_data[\"prev_predictions\"])\n\n select_lbl = (\n \"Enter the names of models for \" + stock_data[\"stock\"] + \":\"\n )\n models_selections = st.multiselect(\n label=select_lbl,\n options=df2.columns,\n ) # allow users to display specific model results on dataframe graph\n\n if not models_selections: # if nothing is selected show all models!\n st.line_chart(df2)\n else:\n st.line_chart(df2[models_selections])\n\n st.markdown(\n \"*Note:* 'Prices' are the actual prices for those days. The rest are model predictions for those days.\\nPrices (in USD) are on the y-axis, the day number in the data is on the x-axis.\"\n )\n\n ############################################\n\n st.markdown(\"### Future (Next-Day) Predictions:\")\n\n df = pd.DataFrame()\n df = df.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"swing_predictions\"]]\n )\n )\n df = df.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"next_day_predictions\"]]\n )\n )\n df = df.append(\n pd.DataFrame([stock_data[\"prediction_results\"][\"model_scores\"]])\n )\n\n df.index = [\n \"Swing Predicton\",\n \"Price Prediction ($)\",\n \"Model Fit Score\",\n ]\n df = df.transpose()\n df # display chart\n\n st.markdown(\n \"- The current price of the stock is *$\"\n + str(\n round(stock_data[\"prediction_results\"][\"current_prev_close\"], 2)\n )\n + \"*.\"\n )\n\n if state.period == \"1mo\":\n st.markdown(\"- *Recommended Model (for 1mo):* SVR-RBF\")\n st.markdown(\n \"- *View the homescreen for more model & dataset size combination recommendations.*\"\n )\n elif state.period == \"6mo\":\n st.markdown(\n \"- *Recommended Model (for 6mo):* SVR-Poly (most recommended), LR, EN, or Lasso.\"\n )\n st.markdown(\n \"- *View the homescreen for more model & dataset size combination recommendations.*\"\n )\n elif state.period == \"1y\":\n st.markdown(\"- *Recommended Model (for 1yr):* SVR-Poly\")\n st.markdown(\n \"- *View the homescreen for more model & dataset size combination recommendations.*\"\n )\n else:\n st.markdown(\n \"- *Note:* View the home screen for information about the best models and training data size combinations.\"\n )\n\n ############################################\n st.markdown(\"### View Other Information:\")\n\n if st.checkbox(\n \"View \" + stock_data[\"stock\"] + \"'s Model Efficiency Timings\"\n ):\n st.markdown(\"#### Model Efficiencies:\")\n st.markdown(\n \"Shows the time in seconds it took models to complete specific tasks:\"\n )\n df3 = pd.DataFrame()\n df3 = df3.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"training_times\"]]\n )\n )\n df3 = df3.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"testing_times\"]]\n )\n )\n df3 = df3.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"new_predictions_times\"]]\n )\n )\n df3 = df3.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"prev_predictions_times\"]]\n )\n )\n df3.index = [\n \"Training\",\n \"Testing/Scoring\",\n \"Future Predictions\",\n \"Historical Predictions\",\n ]\n df3 = df3.transpose()\n df3\n\n ############################################\n\n if st.checkbox(\"View \" + stock_data[\"stock\"] + \"'s Information\"):\n st.markdown(\"#### Company Information:\")\n for key in stock_data[\"stock_info\"].keys():\n st.write(\"*\", key + \":\", stock_data[\"stock_info\"][key])\n else:\n st.markdown(\n \"## Generate data to populate and initialize this page by going to the 'Settings' page and running the tool!\"\n )",
"def historial():\r\n global EnergiaK, EnergiaP, EnergiaT\r\n \r\n t = dt*np.arange(npasos_temporales+1)\r\n plt.figure('Energias del sistema')\r\n plt.title('Energies')\r\n plt.plot(t, EnergiaP, 'b', label='Potential')\r\n plt.plot(t, EnergiaK, 'r', label='Kinetic')\r\n plt.plot(t, EnergiaT, 'black', label='Total')\r\n plt.xlabel('t', fontsize = 18)\r\n plt.xticks(np.linspace(0,14,6), fontsize = 18)\r\n plt.yticks(np.linspace(0,35e-7,6), fontsize = 18)\r\n plt.ylim(0,40e-7)\r\n plt.xlim(0,14)\r\n plt.legend(loc=1)\r\n plt.ticklabel_format(style = 'sci', axis = 'y', scilimits = (0,0))\r\n plt.figure('Potential Energy')\r\n plt.plot(t, EnergiaP, 'b')\r\n plt.xlabel('t', fontsize = 18)\r\n plt.ylabel('Ex Energy', fontsize = 18)\r\n plt.xticks(np.linspace(0,100,11), fontsize = 18)\r\n plt.yticks(np.linspace(0,16,8), fontsize = 18)\r\n plt.xlim(0,100)\r\n plt.ylim(0,25)\r\n if os.path.exists(\"Energias\") and\\\r\n os.path.isfile(\"Energias/Energias.png\")==\\\r\n True:\r\n os.remove(\"Energias/Energias.png\") \r\n plt.savefig('Energias.png',dpi=720)\r\n shutil.move('Energias.png',\"Energias\")\r\n os.remove(\"Energias/energies.out\")\r\n # Escribe y guarda el archivo con los valores de la energia en el tiempo:\r\n sp.savetxt('energies.out', sp.column_stack((t,EnergiaP,EnergiaK,EnergiaT)),fmt=('%1.4e','%1.4e','%1.4e','%1.4e')) \r\n shutil.move('energies.out',\"Energias\") \r\n \r\n else:\r\n os.mkdir(\"Energias\")\r\n plt.savefig('Energias.png',dpi=720)\r\n shutil.move('Energias.png',\"Energias\") \r\n # Escribe y guarda el archivo con los valores de la energia en el tiempo:\r\n sp.savetxt('energies.out', sp.column_stack((t,EnergiaP,EnergiaK,EnergiaT)),fmt=('%1.4e','%1.4e','%1.4e','%1.4e')) \r\n shutil.move('energies.out',\"Energias\")",
"def main(args=None):\n if args is None:\n args = sys.argv[1:]\n parsed_args = parse_arguments(arguments=args)\n\n conn = connect_to_database(db_path=parsed_args.database)\n c = conn.cursor()\n\n scenario_id, scenario = get_scenario_id_and_name(\n scenario_id_arg=parsed_args.scenario_id,\n scenario_name_arg=parsed_args.scenario,\n c=c,\n script=\"capacity_total_plot\",\n )\n\n tech_colors = get_tech_colors(c)\n tech_plotting_order = get_tech_plotting_order(c)\n power_unit = get_unit(c, \"power\")\n\n plot_title = \"{}Total Capacity by Period - {} - Subproblem {} - Stage {}\".format(\n \"{} - \".format(scenario) if parsed_args.scenario_name_in_title else \"\",\n parsed_args.load_zone,\n parsed_args.subproblem,\n parsed_args.stage,\n )\n\n # TODO: is this used?\n plot_name = \"TotalCapacityPlot-{}-{}-{}\".format(\n parsed_args.load_zone, parsed_args.subproblem, parsed_args.stage\n )\n\n df = get_plotting_data(\n conn=conn,\n scenario_id=scenario_id,\n load_zone=parsed_args.load_zone,\n subproblem=parsed_args.subproblem,\n stage=parsed_args.stage,\n )\n\n source, x_col_reordered = process_stacked_plot_data(\n df=df,\n y_col=\"capacity_mw\",\n x_col=[\"period\", \"scenario\"],\n category_col=\"technology\",\n )\n\n # Multi-level index in CDS will be joined into one column with \"_\" separator\n x_col_cds = \"_\".join(x_col_reordered)\n x_col_label = \", \".join([x.capitalize() for x in x_col_reordered])\n plot = create_stacked_bar_plot(\n source=source,\n x_col=x_col_cds,\n x_label=x_col_label,\n y_label=\"Capacity ({})\".format(power_unit),\n category_label=\"Technology\",\n category_colors=tech_colors,\n category_order=tech_plotting_order,\n title=plot_title,\n ylimit=parsed_args.ylimit,\n )\n\n # Show plot in HTML browser file if requested\n if parsed_args.show:\n show_plot(\n plot=plot,\n plot_name=plot_name,\n plot_write_directory=parsed_args.plot_write_directory,\n scenario=scenario,\n )\n\n # Return plot in json format if requested\n if parsed_args.return_json:\n return json_item(plot, \"plotHTMLTarget\")",
"def _run():\n\n temperatures_kelvins = _create_temperature_grid()\n first_derivs_kelvins_pt01 = numpy.gradient(temperatures_kelvins)\n second_derivs_kelvins_pt01 = numpy.gradient(\n numpy.absolute(first_derivs_kelvins_pt01)\n )\n\n this_ratio = (\n numpy.max(temperatures_kelvins) /\n numpy.max(first_derivs_kelvins_pt01)\n )\n\n first_derivs_unitless = first_derivs_kelvins_pt01 * this_ratio\n\n this_ratio = (\n numpy.max(temperatures_kelvins) /\n numpy.max(second_derivs_kelvins_pt01)\n )\n\n second_derivs_unitless = second_derivs_kelvins_pt01 * this_ratio\n\n _, axes_object = pyplot.subplots(\n 1, 1, figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES)\n )\n\n temperature_handle = axes_object.plot(\n temperatures_kelvins, color=TEMPERATURE_COLOUR, linestyle='solid',\n linewidth=SOLID_LINE_WIDTH\n )[0]\n\n second_deriv_handle = axes_object.plot(\n second_derivs_unitless, color=SECOND_DERIV_COLOUR, linestyle='solid',\n linewidth=SOLID_LINE_WIDTH\n )[0]\n\n first_deriv_handle = axes_object.plot(\n first_derivs_unitless, color=FIRST_DERIV_COLOUR, linestyle='dashed',\n linewidth=DASHED_LINE_WIDTH\n )[0]\n\n this_min_index = numpy.argmin(second_derivs_unitless)\n second_derivs_unitless[\n (this_min_index - 10):(this_min_index + 10)\n ] = second_derivs_unitless[this_min_index]\n\n tfp_handle = axes_object.plot(\n -1 * second_derivs_unitless, color=TFP_COLOUR, linestyle='dashed',\n linewidth=DASHED_LINE_WIDTH\n )[0]\n\n axes_object.set_yticks([0])\n axes_object.set_xticks([], [])\n\n x_label_string = r'$x$-coordinate (increasing to the right)'\n axes_object.set_xlabel(x_label_string)\n\n legend_handles = [\n temperature_handle, first_deriv_handle, second_deriv_handle,\n tfp_handle\n ]\n\n legend_strings = [\n TEMPERATURE_LEGEND_STRING, FIRST_DERIV_LEGEND_STRING,\n SECOND_DERIV_LEGEND_STRING, TFP_LEGEND_STRING\n ]\n\n axes_object.legend(legend_handles, legend_strings, loc='lower right')\n\n print 'Saving figure to file: \"{0:s}\"...'.format(OUTPUT_FILE_NAME)\n pyplot.savefig(OUTPUT_FILE_NAME, dpi=FIGURE_RESOLUTION_DPI)\n pyplot.close()",
"def plot(self):\n # get data without totals\n data = self.woe_report[self.woe_report.index != 'total']\n # setup panel\n fig, axs = plt.subplots(1, 3, figsize=(12, 3))\n plt.subplots_adjust(wspace=0.3)\n # first chart\n data['P(Hi|A)'].plot(ax=axs[0], linewidth=3, alpha=0.7)\n data['P(Hi|Ā)'].plot(ax=axs[0], linewidth=3, alpha=0.7)\n axs[0].set_title('Probability distribution')\n axs[0].set_xlabel(data.index.name)\n axs[0].set_ylabel('probability')\n axs[0].legend(['P(Hi|A)', 'P(Hi|Ā)'])\n # second chart\n data['weight-of-evidence'].plot(ax=axs[1], linewidth=3, alpha=0.7)\n axs[1].set_title('WoE')\n axs[1].set_xlabel(data.index.name)\n axs[1].set_ylabel('WoE')\n # third chart\n data['information-value'].plot(ax=axs[2], linewidth=3, alpha=0.7)\n axs[2].set_title('Information value')\n axs[2].set_ylabel('IV')",
"def plotter(self, Result, outcome):\n # Plot results time histories\n fig, axs = plt.subplots(2, 3, figsize=(20, 10))\n axs = axs.reshape(-1)\n axs[0].plot(Result.time, Result.velocity)\n axs[1].plot(Result.time, Result.mass)\n axs[2].plot(Result.time, Result.angle)\n axs[3].plot(Result.time, Result.altitude)\n axs[4].plot(Result.time, Result.distance)\n axs[5].plot(Result.time, Result.radius)\n axs[0].set_title('velocity (m/s) vs time (s)', fontsize=16)\n axs[1].set_title('mass (kg) vs time (s)', fontsize=16)\n axs[2].set_title('angle (rad) vs time (s)', fontsize=16)\n axs[3].set_title('altitude (m) vs time (s)', fontsize=16)\n axs[4].set_title('distance (m) vs time (s)', fontsize=16)\n axs[5].set_title('radius (m) vs time (s)', fontsize=16)\n plt.tight_layout()\n\n # Plot energy deposition curve\n fig, ax = plt.subplots(1, 1, figsize=(8, 8))\n ax.plot(Result.dedz, Result.altitude / 1e3)\n ax.set_xlabel('Energy per unit height [Kt/km]', fontsize=14)\n ax.set_ylabel('Altitude [km]', fontsize=14)\n plt.show()",
"def render(self, agents, episode):\n fig = plt.figure(figsize=(12, 12))\n fig.suptitle(f\"Episode {episode}\")\n gs = gridspec.GridSpec(2, 2)\n\n a_thousand = 1000\n a_million = 1000000\n\n # Price Process\n ax = plt.subplot(gs[0, 0])\n ax.set(title=\"Price Process\")\n plt.plot(self.step_array * self.tau, self.S_tilde, label=\"Price Including Temporary Price Impact\")\n plt.plot(self.step_array * self.tau, self.S, label=\"Price\")\n plt.legend()\n ax.set(ylabel=\"Price\")\n ax.grid(True)\n\n # Revenue Process\n ax = plt.subplot(gs[0, 1])\n ax.set(title=\"Revenue Process\")\n for a in range(len(agents)):\n plt.plot(self.step_array * self.tau, agents[a].R / a_thousand, label=f\"Agent {a+1}\")\n plt.legend()\n ax.grid(True)\n ax.set(ylabel=\"Revenue ($k)\")\n ax.set(xlabel=\"Time Step\")\n\n # Inventory Process\n ax = plt.subplot(gs[1, :])\n ax.set(title=\"Inventory Process\")\n for a in range(len(agents)):\n plt.plot(self.step_array * self.tau, agents[a].x / a_million, label=f\"Agent {a+1}\")\n plt.legend()\n ax.grid(True)\n ax.set(ylabel=\"Inventory (M)\")\n ax.set(xlabel=\"Time Step\")\n\n filename = PPODirectories.tmp + f\"episode-{episode}-simulation.png\"\n\n plt.savefig(filename)\n plt.close()\n\n return filename",
"def generate_statistics_plots(graph_name, graph_steps):\n df_final_situation = pd.DataFrame(columns=[\"type\", \"value\"])\n df_step = pd.DataFrame(columns=[\"type\", \"step\", \"value\"])\n df_exposed = pd.DataFrame(columns=[\"step\", \"type\", \"value\"])\n\n st.markdown(\"\")\n\n for i in range(graph_steps):\n # read graph and print stats\n graph_result_path = \"./data/output/\"\n G = nx.read_gexf(f\"{graph_result_path}G_{graph_name}_step{i}.gexf\")\n print_stats(G, i, graph_name)\n\n # LINE CHART (append informations into dataframe)\n df_step = df_step.append(\n {\"type\": \"not_exposed\", \"step\": i, \"value\": cn.count_not_exposed(G)},\n ignore_index=True,\n )\n df_step = df_step.append(\n {\"type\": \"exposed\", \"step\": i, \"value\": cn.count_exposed(G)},\n ignore_index=True,\n )\n df_step = df_step.append(\n {\"type\": \"infected\", \"step\": i, \"value\": cn.count_infected(G)},\n ignore_index=True,\n )\n\n line_chart = px.line(\n df_step,\n x=\"step\",\n y=\"value\",\n color=\"type\",\n title=f\"Infection overall: {graph_name} step: {i}\",\n )\n\n # BAR CHART (append informations into dataframe)\n df_exposed = df_exposed.append(\n {\n \"step\": i,\n \"type\": \"opinion_leader\",\n \"value\": cn.count_exposed_opinion_leader(G),\n },\n ignore_index=True,\n )\n df_exposed = df_exposed.append(\n {\"step\": i, \"type\": \"bot\", \"value\": cn.count_exposed_bot(G)},\n ignore_index=True,\n )\n df_exposed = df_exposed.append(\n {\"step\": i, \"type\": \"user\", \"value\": cn.count_exposed_user(G)},\n ignore_index=True,\n )\n bar_chart = px.bar(\n df_exposed,\n x=\"step\",\n y=\"value\",\n color=\"type\",\n title=f\"Type of agents exposed: {graph_name} step: {i}\",\n )\n\n # PIE CHART (append informations into dataframe)\n if i == 4:\n df_final_situation = df_final_situation.append(\n {\"type\": \"not_exposed\", \"value\": cn.count_not_exposed(G)},\n ignore_index=True,\n )\n df_final_situation = df_final_situation.append(\n {\"type\": \"exposed\", \"value\": cn.count_exposed(G)},\n ignore_index=True,\n )\n df_final_situation = df_final_situation.append(\n {\"type\": \"infected\", \"value\": cn.count_infected(G)},\n ignore_index=True,\n )\n\n #### CREATE THE PLOTS\n ##Uncomment plot(..) to save the plots to disk in html format\n\n plot_folder = \"./data/plots/\"\n\n # Plotly Line Plot\n # plot(line_chart, filename=f\"{plot_folder}steps_{graph_name}.html\")\n st.plotly_chart(line_chart, use_container_width=True)\n\n # Plotly bar plot\n # plot(bar_chart, filename=f\"{plot_folder}exposed_type_{graph_name}.html\")\n st.plotly_chart(bar_chart, use_container_width=True)\n\n # Plotly final pie chart\n final_pie_chart = px.pie(\n df_final_situation, values=\"value\", names=\"type\", title=f\"Final situation plot of: {graph_name}\"\n )\n # plot(final_pie_chart, filename=f\"{plot_folder}final_situation.html\")\n st.plotly_chart(final_pie_chart, use_container_width=True)\n\n print(\"\\nStatistics calculated succesfully\")\n\n return True",
"def show_results(self):\n\n N = split_list(self.N)\n # create subplot\n fig = make_subplots(rows=1,cols=2,\n subplot_titles=('Fish population', 'Harvested fish'),\n specs=[[{'type': 'xy'}, {'type': 'pie'}]])\n #Add population line graph\n fig.add_trace(go.Scatter(y=N['odds'], x=np.linspace(1, 11, 6), name='odd year population',\n hovertemplate =\n 'Year: %{x}'+ '<br>Pop: %{y}'),\n row=1, col=1)\n fig.add_trace(go.Scatter(y=N['evens'], x=np.linspace(2, 12, 6), name='even year population',\n hovertemplate =\n 'Year: %{x}'+ '<br>Pop: %{y}'),\n row=1, col=1)\n fig.update_xaxes(title_text=\"year\", row=1, col=1)\n fig.update_yaxes(title_text=\"population\", row=1, col=1)\n\n # cannot use 'paper' as yref due to bug in sublplot.\n fig.add_shape(type='line',\n xref='x', yref='y',\n x0=2.5, y0=-10, x1=2.5, y1=1000,\n line=dict(color='Black', width=3),\n row=1, col=1)\n\n # create pie chart\n colors = ['#636EFA', '#EF553B'] \n labels = ['total odd year harvest', 'total even year harvest']\n M = split_list(self.harvest_record)\n values = [sum(M['odds']), sum(M['evens'])]\n fig.add_trace(go.Pie(labels=labels, values=values, hoverinfo='label', textinfo='value', marker=dict(colors=colors)), \n row=1, col=2)\n\n # add title\n fig.update_layout(title_text='Results') \n fig.write_html(\"fish_trap_simulation.html\")\n\n \n return fig",
"def visualise_food_consumption(data: LogData, directory: Path):\n\n figure, axes = plot.subplots()\n\n food_history = get_food_history(data)\n\n axes.plot(food_history.keys(), food_history.values(), label=\"Food\", color=\"blue\", **{\"ls\": \"--\"})\n\n axes.legend(loc=\"upper left\")\n axes.set_xlim(0, data.duration_secs())\n axes.set_xlabel(\"Time (seconds)\")\n axes.set_ylabel(\"Amount\")\n axes.set_title(\"Food availability\")\n\n plot.savefig(directory / Path(\"food_consumption.png\"))\n plot.close()",
"def charts():\n\n global show_gaps\n global timespan\n\n form = ChartForm(\n request.form,\n graph_type=timespans.index(timespan),\n graph_gaps=show_gaps\n )\n\n if request.method == 'POST':\n if form.submit_button.data:\n timespan = timespans[int(form.graph_type.data)]\n show_gaps = form.graph_gaps.data\n else:\n flash('Unknown Event', 'error')\n\n chart = Chart(app)\n data_values1, data_values2, data_values3, data_labels = \\\n chart.get_data(timespan, show_gaps)\n\n if len(data_values3) > 0:\n cb = np.array(data_values3)\n peaks = peakutils.indexes(cb, thres=0.02 / max(cb), min_dist=5)\n\n starts_total = len(peaks)\n starts_per_h = int(round(float(starts_total) / \\\n float(hourtable[timespan]), 0))\n else:\n starts_total = 0\n starts_per_h = 0\n\n return render_template(\n 'charts.html',\n form=form,\n user=current_user,\n values1=data_values1,\n values2=data_values2,\n values3=data_values3,\n labels=data_labels,\n burner_total=starts_total,\n burner_ph=starts_per_h,\n )",
"def get_ecg_graph():\n titles = ['ecg1', 'ecg2', 'ecg3']\n colors = ['rgb(240,0,0)', 'rgb(0,240,0)', 'rgb(0,0,240)']\n update()\n signames_ecg = queries['signames_ecg']\n signals = queries['signals']\n latesthr = queries['latesthr']\n return html.Div(className='ecg', children=[\n html.Div(style={'display': 'flex', 'height': '40vh'},\n children=[dcc.Graph(\n id=titles[i] + signame,\n style={'width': '100%'},\n figure={\n 'data': [\n {'x': signals[signame]['time'],\n 'y': signals[signame][titles[i]],\n 'mode': 'line', 'name': signame, 'line': {'color':colors[i]}}\n ],\n 'layout': {\n 'font': {'color':'#fff'},\n 'title': '{}-{}'.format(signame, titles[i]),\n 'xaxis': {'title': 'time', 'color': '#fff', 'showgrid': 'False'},\n 'yaxis': {'title': 'voltage (mv)', 'color': '#fff', 'showgrid': 'False', 'range': np.linspace(-2.5, 2.5, 10)},\n 'paper_bgcolor':'#000', 'plot_bgcolor':'#000'\n }\n }\n ) for i in range(len(titles))]\n +\n [html.Div(\n style={'justify-content': 'center', 'display': 'flex',\n 'align-items': 'center', 'width': '10vh', 'font-size': '30pt', 'color': 'white'},\n children=['{}'.format(latesthr[signame][0])])\n ]\n ) for signame in signames_ecg])",
"def charts(request):\n \n def histogram():\n x0 = np.random.randn(500)\n # Add 1 to shift the mean of the Gaussian distribution\n x1 = np.random.randn(500) + 1\n\n fig = go.Figure()\n fig.add_trace(go.Histogram(x=x0))\n fig.add_trace(go.Histogram(x=x1))\n\n # Overlay both histograms\n fig.update_layout(barmode='overlay')\n fig.update_layout(title='Histogram')\n # Reduce opacity to see both histograms\n fig.update_traces(opacity=0.75)\n plot_div = plot(fig, output_type='div', include_plotlyjs=False)\n return plot_div\n \n def box_plot():\n np.random.seed(1)\n y0 = np.random.randn(50) - 1\n y1 = np.random.randn(50) + 1\n\n fig = go.Figure()\n fig.add_trace(go.Box(y=y0))\n fig.add_trace(go.Box(y=y1))\n fig.update_layout(title='Box Plot')\n plot_div = plot(fig, output_type='div', include_plotlyjs=False)\n return plot_div\n \n def heat_map():\n \n np.random.seed(1)\n programmers = ['Alex','Nicole','Sara','Etienne','Chelsea','Jody','Marianne']\n base = datetime.datetime.today()\n dates = base - np.arange(180) * datetime.timedelta(days=1)\n z = np.random.poisson(size=(len(programmers), len(dates)))\n\n fig = go.Figure(data=go.Heatmap(\n z=z,\n x=dates,\n y=programmers,\n colorscale='Viridis'))\n\n fig.update_layout(\n title='Heat Map',\n xaxis_nticks=36)\n\n plot_div = plot(fig, output_type='div', include_plotlyjs=False)\n return plot_div\n \n def scatter():\n x1 = [1,2,3,4]\n y1 = [30, 35, 25, 45]\n text1 = ['A', 'B', 'C', 'D']\n trace = go.Scatter(\n x=x1, y = y1, text= text1, mode='markers+text'\n )\n layout = dict(\n title='Scatter Plots',\n xaxis=dict(range=[min(x1), max(x1)]),\n yaxis=dict(range=[min(y1), max(y1)])\n )\n fig = go.Figure(data=[trace],layout=layout)\n plot_div = plot(fig, output_type='div', include_plotlyjs=False)\n return plot_div\n\n context = {\n 'plot1':heat_map(),\n 'plot2':scatter(),\n 'plot3':histogram(),\n 'plot4':box_plot()\n }\n return render(request, 'base/charts.html', context)"
] | [
"0.64270926",
"0.641641",
"0.63471955",
"0.633336",
"0.6203229",
"0.6157202",
"0.6156514",
"0.60662705",
"0.60575175",
"0.60480624",
"0.6023913",
"0.5948977",
"0.5948531",
"0.5910911",
"0.589686",
"0.5895649",
"0.58862346",
"0.58814776",
"0.5880334",
"0.58726776",
"0.58645946",
"0.58641326",
"0.5848611",
"0.58399653",
"0.5827738",
"0.5824148",
"0.5818638",
"0.58007526",
"0.579874",
"0.5791964"
] | 0.77813894 | 0 |
Download and unpack the Zenodo minted data for the current stitches distribution. | def fetch_zenodo(self):
# full path to the stitches root directory where the example dir will be stored
if self.data_dir is None:
data_directory = pkg_resources.resource_filename('stitches', 'data')
else:
data_directory = self.data_dir
# build needed subdirectories if they do not already exist
tas_data_path = os.path.join(data_directory, "tas-data")
temp_data_path = os.path.join(data_directory, "temp-data")
if not os.path.exists(tas_data_path):
os.mkdir(tas_data_path)
if not os.path.exists(temp_data_path):
os.mkdir(temp_data_path)
# get the current version of stitches that is installed
current_version = pkg_resources.get_distribution('stitches').version
try:
data_link = InstallPackageData.DATA_VERSION_URLS[current_version]
except KeyError:
msg = f"Link to data missing for current version: {current_version}. Using default version: {InstallPackageData.DEFAULT_VERSION}"
data_link = InstallPackageData.DEFAULT_VERSION
print(msg)
# retrieve content from URL
print("Downloading example data for stitches version {}. This may take a few minutes...".format(current_version))
response = requests.get(data_link)
with zipfile.ZipFile(BytesIO(response.content)) as zipped:
# extract each file in the zipped dir to the project
for f in zipped.namelist():
extension = os.path.splitext(f)[-1]
# Extract only the csv and nc files
if all([len(extension) > 0, extension in (".csv", ".nc")]):
basename = os.path.basename(f)
# Check to see if tas-data is in the file path
if "tas-data" in f:
basename = os.path.join("tas-data", basename)
out_file = os.path.join(data_directory, basename)
# extract to a temporary directory to be able to only keep the file out of the dir structure
with tempfile.TemporaryDirectory() as tdir:
# extract file to temporary directory
zipped.extract(f, tdir)
# construct temporary file full path with name
tfile = os.path.join(tdir, f)
print(f"Unzipped: {out_file}")
# transfer only the file sans the parent directory to the data package
shutil.copy(tfile, out_file) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fetch_zenodo(self):\n\n # retrieve content from URL\n try:\n logging.info(f\"Downloading example data from {self.url}\")\n r = requests.get(self.url, stream=True)\n with io.BytesIO() as stream:\n with tqdm.wrapattr(\n stream,\n 'write',\n file=sys.stdout,\n miniters=1,\n desc=self.url,\n total=int(r.headers.get('content-length', 0))\n ) as file:\n for chunk in r.iter_content(chunk_size=4096):\n file.write(chunk)\n with zipfile.ZipFile(stream) as zipped:\n # extract each file in the zipped dir to the project\n for f in zipped.namelist():\n logging.info(\"Unzipped: {}\".format(os.path.join(self.destination, f)))\n zipped.extract(f, self.destination)\n\n logging.info(\"Download and install complete.\")\n\n self.close_logger()\n\n except requests.exceptions.MissingSchema:\n msg = f\"Unable to download data from {self.url}\"\n logging.exception(msg)\n self.close_logger()\n raise",
"def fetch_the_data():\n subprocess.run([\"wget\", \"https://storage.googleapis.com/recipe-box/recipes_raw.zip\"])\n subprocess.run([\"unzip\", \"recipes_raw.zip\", \"-d\", RECIPES_DIRPATH])\n subprocess.run([\"rm\", \"recipes_raw.zip\"])",
"def x_download():\n\t#_loadconfig()\n\tconf = _get_config()\n\t#print conf['xplane']\n\tdownload_url = conf['xplane']['download']\n\tlocal(\"wget -P %s %s\" % (navimport.conf.work_dir(\"/xplane_zips\"), download_url))",
"def download_data():\n url = 'https://www.dropbox.com/s/h9ubx22ftdkyvd5/ml-latest-small.zip?dl=1'\n urllib.request.urlretrieve(url, 'ml-latest-small.zip')\n zfile = zipfile.ZipFile('ml-latest-small.zip')\n zfile.extractall()\n zfile.close()",
"def fetch_taiwan_ntu_dsi():\n dipy_home = pjoin(os.path.expanduser('~'), '.dipy')\n uraw = 'http://dl.dropbox.com/u/2481924/taiwan_ntu_dsi.nii.gz'\n ubval = 'http://dl.dropbox.com/u/2481924/tawian_ntu_dsi.bval'\n ubvec = 'http://dl.dropbox.com/u/2481924/taiwan_ntu_dsi.bvec'\n ureadme = 'http://dl.dropbox.com/u/2481924/license_taiwan_ntu_dsi.txt'\n folder = pjoin(dipy_home, 'taiwan_ntu_dsi')\n\n md5_list = ['950408c0980a7154cb188666a885a91f', # data\n '602e5cb5fad2e7163e8025011d8a6755', # bval\n 'a95eb1be44748c20214dc7aa654f9e6b', # bvec\n '7fa1d5e272533e832cc7453eeba23f44'] # license\n\n url_list = [uraw, ubval, ubvec, ureadme]\n fname_list = ['DSI203.nii.gz', 'DSI203.bval', 'DSI203.bvec', 'DSI203_license.txt']\n\n if not os.path.exists(folder):\n print('Creating new directory %s' % folder)\n os.makedirs(folder)\n print('Downloading raw DSI data (91MB)...')\n\n for i in range(len(md5_list)):\n _get_file_data(pjoin(folder, fname_list[i]), url_list[i])\n check_md5(pjoin(folder, fname_list[i]), md5_list[i])\n\n print('Done.')\n print('Files copied in folder %s' % folder)\n print('See DSI203_license.txt for LICENSE.')\n print('For the complete datasets please visit :')\n print('http://dsi-studio.labsolver.org')\n\n else:\n print('Dataset is already in place. If you want to fetch it again, please first remove the folder %s ' % folder)",
"def download():\n base_loc = DATA_DIR + '/raw/human_activity'\n loc = base_loc + '/human_activity.zip'\n if os.path.exists(loc):\n print('Path already exists at {}. If you wish to re-download you must delete this folder.'.format(loc))\n return\n if not os.path.exists(base_loc):\n os.mkdir(base_loc)\n\n url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00341/HAPT%20Data%20Set.zip'\n urllib.request.urlretrieve(url, loc)\n\n with zipfile.ZipFile(loc, 'r') as zip_ref:\n zip_ref.extractall(base_loc)",
"def download():\n basedir = os.path.dirname(os.path.dirname(__file__))\n print(basedir)\n datadir = os.path.join(basedir,\"data/NeonTreeEvaluation/\")\n print(\"Downloading data files to {}\".format(datadir)) \n eval_url = zenodo_url(concept_rec_id=\"3723356\", datadir=datadir)",
"def electroweakinos_likelihoods_download():\n oneLbb_HEPData_URL = \"https://www.hepdata.net/record/resource/1267798?view=true\"\n targz_filename = \"oneLbb_workspaces.tar.gz\"\n response = requests.get(oneLbb_HEPData_URL, stream=True)\n assert response.status_code == 200\n with open(targz_filename, \"wb\") as file:\n file.write(response.content)\n assert (\n hashlib.sha256(open(targz_filename, \"rb\").read()).hexdigest()\n == \"64bbbef9f1aaf9e30d75c8975de4789484329b2b825d89331a6f2081661aa728\"\n )\n # Open as a tarfile\n yield tarfile.open(targz_filename, \"r:gz\")\n os.remove(targz_filename)",
"def download():\n toydata = requests.get(DATA_URL).json()\n return toydata",
"def download_data(origin_time, net, sta, loc, chan):\n \n dataDir_get = '/import/netapp-m-02-bay200/mseed_online/archive/'\n \n fileName = \".\".join((net, sta, \".\" + chan + \".D\",\n origin_time.strftime(\"%Y.%j\")))\n filePath = os.path.join(dataDir_get, origin_time.strftime(\"%Y\"),\n net, sta, chan + '.D', fileName)\n o_time2 = origin_time + 86400\n fileName2 = \".\".join((net, sta, \".\" + chan + \".D\",\n o_time2.strftime(\"%Y.%j\")))\n filePath2 = os.path.join(dataDir_get, o_time2.strftime(\"%Y\"),\n net, sta, chan + '.D', fileName2)\n\n if os.path.isfile(filePath):\n if origin_time.hour > 21:\n st = Stream()\n st.extend(read(filePath, starttime = origin_time - 180,\n endtime = origin_time + 3 * 3600))\n st.extend(read(filePath2, \n starttime = UTCDateTime(o_time2.year, o_time2.month, \n o_time2.day, 0, 0),\n endtime = origin_time + 3 * 3600))\n st.merge(method=-1)\n else:\n st = read(filePath, starttime = origin_time - 180,\n endtime = origin_time + 3 * 3600)\n else:\n print \"++++ cannot find the following file: \\n %s \\n++++\" % filePath\n\n if not st:\n raise RotationalProcessingException('Data not available for this'\n ' event...')\n st.trim(starttime=origin_time-180, endtime=origin_time+3*3600)\n\n print 'Download of', st[0].stats.station, st[0].stats.channel, \\\n 'data successful!'\n\n return st",
"def download(root: str) -> None:\n for ix in [1, 2]:\n fn = f\"lizard_images{ix}.zip\"\n url = f\"https://warwick.ac.uk/fac/cross_fac/tia/data/lizard/{fn}\"\n SimpleDownloader.download(url, root)\n\n url = \"https://warwick.ac.uk/fac/cross_fac/tia/data/lizard/lizard_labels.zip\"\n SimpleDownloader.download(url, root)\n LizardDataModule.extract_zips(root, rm=True)",
"def download_data():\n url = 'https://www.dropbox.com/s/xk4glpk61q3qrg2/imdb.tgz?dl=1'\n urllib.request.urlretrieve(url, 'imdb.tgz')\n tar = tarfile.open(\"imdb.tgz\")\n tar.extractall()\n tar.close()",
"def maybe_download():\n\n print(\"Downloading Inception 5h Model ...\")\n download.maybe_download_and_extract(url=data_url, download_dir=data_dir)",
"def download_mission(self):\n cmds = self.vehicle.commands\n cmds.download()\n # Wait until download is complete.\n cmds.wait_valid()",
"def download_data():\n url = 'https://www.dropbox.com/s/8oehplrobcgi9cq/imdb.tgz?dl=1'\n urllib.request.urlretrieve(url, 'imdb.tgz')\n tar = tarfile.open(\"imdb.tgz\")\n tar.extractall()\n tar.close()",
"def download_data():\r\n print('Downloading cifar-10 data...')\r\n request.urlretrieve(dataurl)\r\n print('Done')\r\n print('Please unzip files. command is:')\r\n print('gzip -d cifar-10-python.tar.gz')\r\n print('tar -xf cifar-10-python.tar')\r\n exit()",
"def manually_download_MNIST(DATASET_DIR):\n\n output_path = os.path.join(DATASET_DIR, \"MNIST.zip\")\n if not os.path.exists(DATASET_DIR):\n os.mkdir(DATASET_DIR)\n url = \"https://github.com/vandedok/IIC_tutorial/releases/download/v0.2/MNIST.zip\"\n print(\"Downloading MNIST...\", end=\" \")\n urllib.request.urlretrieve(url, output_path)\n print(\"Done!\")\n\n with zipfile.ZipFile(output_path, \"r\") as zip_ref:\n zip_ref.extractall(DATASET_DIR)",
"def _download(self):\n self._system.download(\"http://geant4.web.cern.ch/geant4/support/source/\" + self._tar_name)",
"def download_data():\n # Download Unihan meta data for radical-stroke analysis\n os.system(' mkdir Unihan')\n os.system(' curl -O http://unicode.org/Public/UCD/latest/ucd/Unihan.zip')\n os.system(' apt-get -y install unzip')\n os.system(' unzip Unihan.zip -d Unihan/')\n os.system(' rm Unihan.zip')\n\n data_path = 'Unihan/Unihan_RadicalStrokeCounts.txt'\n assert(os.path.isfile(data_path))\n\n return data_path",
"def download():\r\n reader = GSODDataReader()\r\n year_list = range(2001, 2012)\r\n austin = reader.collect_data(year_list, exact_station=True,\r\n station_name='AUSTIN CAMP MABRY', state='TX', country='US')\r\n houston = reader.collect_data(year_list, exact_station=True,\r\n station_name='HOUSTON/D.W. HOOKS', state='TX', country='US')\r\n new_york = reader.collect_data(year_list, exact_station=True,\r\n station_name='NEW YORK/LA GUARDIA', state='NY', country='US')\r\n newark = reader.collect_data(year_list, exact_station=True,\r\n station_name='NEWARK INTL AIRPORT', state='NJ', country='US')\r\n punta_arenas = reader.collect_data(year_list, exact_station=True,\r\n station_name='PUNTA ARENAS', country='CH')\r\n wellington = reader.collect_data(year_list, exact_station=True,\r\n station_name='WELLINGTON AIRPORT', country='NZ')\r\n store = HDFStore('weather.h5')\r\n store['austin'] = austin\r\n store['houston'] = houston\r\n store['nyc'] = new_york\r\n store['newark'] = newark\r\n store['punta_arenas'] = punta_arenas\r\n store['wellington'] = wellington\r\n store.close()",
"def _download_chieffi04():\n url = 'http://cdsarc.u-strasbg.fr/viz-bin/nph-Cat/tar.gz?J%2FApJ%2F608%2F405'\n import urllib\n print('Downloading Chieffi 04 yield tables from Vizier (should happen only at the first time)...')\n if os.path.exists(MASTERFILE):\n os.remove(MASTERFILE)\n urllib.urlretrieve(url,MASTERFILE)\n\n import tarfile\n tar = tarfile.open(MASTERFILE)\n tar.extractall(path=DATADIR)\n tar.close()",
"def _download_chieffi04():\n url = 'http://cdsarc.u-strasbg.fr/viz-bin/nph-Cat/tar.gz?J%2FApJ%2F608%2F405'\n import urllib\n print('Downloading Chieffi 04 yield tables from Vizier (should happen only at the first time)...')\n if os.path.exists(MASTERFILE):\n os.remove(MASTERFILE)\n urllib.urlretrieve(url,MASTERFILE)\n\n import tarfile\n tar = tarfile.open(MASTERFILE)\n tar.extractall(path=DATADIR)\n tar.close()",
"def cli(date, path, mission):\n download.main(path, mission, date)",
"def do_download() -> None:\n\n asset_dir = pycozmo.util.get_cozmo_asset_dir()\n resource_file = asset_dir / \"obb.zip\"\n\n # Check whether resources have already been downloaded.\n if os.path.exists(asset_dir / \"resources.txt\"):\n print(f\"Resources already available in {asset_dir}\")\n sys.exit(1)\n\n # Create directory structure.\n try:\n os.makedirs(asset_dir)\n except FileExistsError:\n pass\n\n print(\"Downloading...\")\n\n res = download(resource_file)\n if not res:\n print(\"ERROR: Download failed.\")\n sys.exit(2)\n\n print(\"Extracting...\")\n\n res = extract(\n resource_file,\n asset_dir / \"obb\")\n if not res:\n print(\"ERROR: Extraction failed.\")\n sys.exit(3)\n os.remove(str(resource_file))\n\n res = extract(\n asset_dir / \"obb\" / \"Android\" / \"obb\" / \"com.anki.cozmo\" / \"main.1204.com.anki.cozmo.obb\",\n asset_dir / \"..\")\n if not res:\n print(\"ERROR: Secondary extraction failed.\")\n sys.exit(4)\n shutil.rmtree(asset_dir / \"obb\")\n\n res = extract(\n asset_dir / \"cozmo_resources\" / \"sound\" / \"AudioAssets.zip\",\n asset_dir / \"cozmo_resources\" / \"sound\")\n if not res:\n print(\"ERROR: Sound extraction failed.\")\n sys.exit(5)\n\n print(f\"Resources downloaded successfully in {asset_dir}\")",
"def download_data():\n urllib.request.urlretrieve('http://cs.iit.edu/~culotta/cs579/a1/edges.txt.gz', 'edges.txt.gz')",
"def download(self, args):\n\n\t\t\"\"\" Default argument for Architecture \"\"\"\n\t\tif len(args) >= 4:\n\t\t\tarch = args[3]\n\t\telse:\n\t\t\tarch = platform.processor()\n\n\t\t\"\"\" Default argument for Version \"\"\"\n\t\tif len(args) >= 3:\n\t\t\tif args[2] == \"latest\":\n\t\t\t\tversion = \"Latest\"\n\t\t\telse:\n\t\t\t\tversion = args[2]\n\t\telse:\n\t\t\tversion = \"Latest\"\n\n\t\t\"\"\" Find package path from package list, based on prev. arguments \"\"\"\n\t\tif len(args) >= 2:\n\t\t\tpackage = args[1]\n\t\t\tfilename = False\n\t\t\t\n\t\t\tversions = self.master.Dump(package)\n\t\t\tfor d in versions:\n\t\t\t\tif d[\"Version\"] == version:\n\t\t\t\t\tif d[\"Version\"] != \"Latest\" and d[\"Architecture\"] == arch:\n\t\t\t\t\t\tfilename = d[\"Filename\"]\n\t\t\t\t\telse:\n\t\t\t\t\t\tfor e in versions:\n\t\t\t\t\t\t\tif e[\"Version\"] == d[\"LatestVersion\"] and e[\"Architecture\"] == arch:\n\t\t\t\t\t\t\t\tfilename = e[\"Filename\"]\n\t\t\t\t\t\t\t\tversion = d[\"LatestVersion\"];\n\t\t\tif not filename:\n\t\t\t\tself.write_line(\"ERROR XXX: Package not found.\")\n\t\t\t\treturn\n\n\t\t\t\"\"\" Find chunks to download \"\"\"\n\t\t\tid = 0\n\t\t\tto_download = False\n\t\t\tfor f in self.torrent_info.files():\n\t\t\t\tprint(f.path.replace(\"packages/\", \"\") + \" = \" + filename);\n\t\t\t\tif f.path.replace(\"packages/\", \"\") == filename:\n\t\t\t\t\tto_download = f\n\t\t\t\t\tbreak;\n\t\t\t\tid += 1\n\t\t\tif not to_download:\n\t\t\t\tprint(\"ERROR XXX: dunno\")\n\t\t\t\treturn\n\n\t\t\t\"\"\" Set chunks priority to 7? (download max priority) \"\"\"\n\t\t\tpr = self.torrent_info.map_file(id, 0, to_download.size);\n\t\t\tn_pieces = math.ceil(pr.length / self.torrent_info.piece_length() + 1);\n\n\t\t\tfor i in range(self.torrent_info.num_pieces()):\n\t\t\t\tif i in range(pr.piece, pr.piece + n_pieces):\n\t\t\t\t\tself.handler.piece_priority(i, 7)\n\n\n\t\t\t\"\"\" Print download of package status \"\"\"\n\t\t\tself.print_status(id, pr, package, version, filename)\n\t\t\t\t\n\t\t\t\"\"\" Check the server for hash validation \"\"\"\n\t\t\tif self.valid_tpkg_file(to_download.path):\n\t\t\t\tself.write_line(\"DONE {0} {1} {2} {3}\".format(package, version, arch, self.config[\"daemon\"][\"rootdir\"] + \"/\" + to_download.path).replace('//', '/'))\n\t\t\telse:\n\t\t\t\tself.write_line(\"ERROR XXX: Hash verification failed.\")\n\t\telse:\n\t\t\tself.write_line(\"INVALID ARGUMENTS\");",
"def _download(self) -> None:\n download_url(\n self.url,\n self.root,\n filename=self.data_dir,\n md5=self.md5 if self.checksum else None,\n )\n self._extract()",
"def download():\n env_banner()\n\n download_data = Download()\n download_data()\n click.echo('Download done.')",
"def download_and_extract(down_dir=download_dir, url=tuda_url):\n\n wget.download(url, down_dir) \n tar_filepath = os.path.join(down_dir, \"german-speechdata-package-v2.tar.gz\")\n #with tarfile.open(tar_filepath, \"r\") as tar:\n # tar.extractall(down_dir)",
"def fetch_syn_data():\n dipy_home = pjoin(os.path.expanduser('~'), '.dipy')\n url = 'https://dl.dropboxusercontent.com/u/5918983/'\n t1 = url + 't1.nii.gz'\n b0 = url + 'b0.nii.gz'\n \n folder = pjoin(dipy_home, 'syn_test')\n\n md5_list = ['701bda02bb769655c7d4a9b1df2b73a6', # t1\n 'e4b741f0c77b6039e67abb2885c97a78'] # b0\n\n url_list = [t1, b0]\n fname_list = ['t1.nii.gz', 'b0.nii.gz']\n\n if not os.path.exists(folder):\n print('Creating new directory %s' % folder)\n os.makedirs(folder)\n print('Downloading t1 and b0 volumes from the same session (12MB)...')\n\n for i in range(len(md5_list)):\n _get_file_data(pjoin(folder, fname_list[i]), url_list[i])\n check_md5(pjoin(folder, fname_list[i]), md5_list[i])\n\n print('Done.')\n print('Files copied in folder %s' % folder)\n else:\n print('Dataset is already in place. If you want to fetch it again, please first remove the folder %s ' % folder)"
] | [
"0.66224813",
"0.63485897",
"0.6268055",
"0.6250528",
"0.60906255",
"0.6000069",
"0.59467095",
"0.5897437",
"0.5875298",
"0.5873493",
"0.5796746",
"0.5769636",
"0.57534075",
"0.57515734",
"0.5747376",
"0.5746502",
"0.5727455",
"0.5726162",
"0.5718221",
"0.57016945",
"0.5694621",
"0.5694621",
"0.56852186",
"0.56740814",
"0.56583333",
"0.5636215",
"0.56280816",
"0.56214005",
"0.5591625",
"0.55885893"
] | 0.7480389 | 0 |
Get all morbidities by war name. | def get_morbidities_for_war_era():
war_era_name = request.args.get('warEra')
if not war_era_name:
raise BadRequestError("warEra parameter is missing")
return datasources_service.get_morbidities_for_war_era(war_era_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_war_eras():\n return datasources_service.get_war_eras()",
"def get_movies(self):\n worlds = ['cinemaworld',\n 'filmworld']\n\n pool = Pool(2)\n movies_world = pool.map(self.get_movies_list, worlds)\n pool.close()\n pool.join()\n\n for m_world in movies_world:\n world_type = list(m_world)[0]\n if world_type == \"cinemaworld\":\n cinemaworld_movies = m_world[world_type]\n elif world_type == \"filmworld\":\n filmworld_movies = m_world[world_type]\n\n return cinemaworld_movies, filmworld_movies",
"def get_movies_list(self, world):\n api_url = self.api_url_base + '/api/{}/movies'.format(world)\n movies_dict = self.get_dict_from_apis(api_url)\n ret_dict = {world: None}\n if movies_dict is not None:\n ret_dict[world] = movies_dict['Movies']\n return ret_dict",
"def get_weathers():\n names = [\n name for name in dir(carla.WeatherParameters)\n if re.match('[A-Z].+', name)\n ]\n weathers = {x: getattr(carla.WeatherParameters, x) for x in names}\n return weathers",
"def _getAllMinistries(date):\n session = Session()\n mfilter=sql.or_( \n sql.between(date, schema.groups.c.start_date, schema.groups.c.end_date),\n sql.and_(\n (schema.groups.c.start_date < date ),\n (schema.groups.c.end_date == None)\n )\n )\n query = session.query(domain.Ministry).filter(mfilter)\n return query.all()",
"def load_towns():\n if not hasattr(g, 'towns'):\n #g.towns = run_query('select id, name from municipios')\n g.towns = get_towns()\n return g.towns",
"def query_interstate_wars(req_war_id):\n\tthis_query = Query('interstate', req_war_id)\n\tthis_query.send_query()\n\tresponse = this_query.pull_result()\n\treturn jsonify(response)\n\t#return render_template('response.html', response=response)",
"def pharmacies(self) -> PharmasiesList:\n data = self.get(\"minhealth_pharmacies\")\n \n ls = [Pharmacies(**pharm) for pharm in data]\n return PharmasiesList(items=ls)",
"def mlbwar(self, irc, msg, args, opttype):\n \n opttype = opttype.lower()\n \n wartypelist = ['overall','pitching','offense','fielding']\n \n if opttype not in wartypelist:\n irc.reply(\"WAR type must be one of: %s\" % wartypelist)\n return\n \n url = self._b64decode('aHR0cDovL2VzcG4uZ28uY29tL21sYi8=')\n\n try:\n req = urllib2.Request(url)\n html = (urllib2.urlopen(req)).read()\n except: \n irc.reply(\"Failed to open: %s\" % url)\n return\n \n soup = BeautifulSoup(html)\n regexString = 'war' + opttype + '.*?' # build regex ourselves for searching.\n div = soup.find('div', attrs={'id':re.compile(regexString)})\n\n table = div.find('table')\n rows = table.findAll('tr')[1:] # skip header.\n\n append_list = []\n\n for row in rows:\n rank = row.find('td')\n player = rank.findNext('td')\n team = player.findNext('td')\n war = team.findNext('td')\n append_list.append(ircutils.bold(player.getText()) + \" (\" + team.getText() + \") \" + war.getText())\n\n descstring = string.join([item for item in append_list], \" | \")\n output = \"{0} {1} :: {2}\".format(ircutils.mircColor(\"WAR Leaders for:\", 'red'), ircutils.underline(opttype.title()), descstring)\n \n irc.reply(output)",
"def get(self, cityname):\n response = hereService.getWeatherByCity(cityname)\n return response",
"def get_marcels(goalies, date, df):\n goalies_marcels = []\n for goalie in goalies:\n goalie_marcels = marcels_players(goalie, date, df)\n goalies_marcels.append({\"goalie\": goalie, \"adj_fsv\": goalie_marcels['fsv'], \"gp\": goalie_marcels['gp']})\n\n return goalies_marcels",
"def get_all(self, name):\n\t\tpass",
"def get_boards(trello_client, board_filter=None, board_name=None):\n all_boards = trello_client.list_boards(board_filter=board_filter)\n\n if board_name is not None:\n all_boards = [b for b in all_boards if board_name == b.name]\n\n logger.debug(\"{} boards loaded using '{}' filter\".format(len(all_boards), board_filter))\n\n return all_boards",
"def get_all_movies(self):\n cinemaworld_movies, filmworld_movies = self.get_movies()\n\n if cinemaworld_movies is not None:\n self.get_title_map(cinemaworld_movies, \"cinemaworld\")\n if filmworld_movies is not None:\n self.get_title_map(filmworld_movies, \"filmworld\")\n\n return self.title_map",
"def effect_list(self):\n moods = []\n for mood in self._moodlist:\n if \"name\" in mood:\n moods.append(mood['name'])\n return moods",
"def get_wrf_stations(pool):\n\n wrfv3_stations = {}\n\n connection = pool.connection()\n try:\n with connection.cursor() as cursor:\n sql_statement = \"SELECT `id`, `name` FROM `station` WHERE `id` like %s\"\n row_count = cursor.execute(sql_statement, \"11_____\")\n if row_count > 0:\n results = cursor.fetchall()\n for dict in results:\n wrfv3_stations[dict.get(\"name\")] = dict.get(\"id\")\n return wrfv3_stations\n else:\n return None\n except Exception as exception:\n error_message = \"Retrieving wrf stations failed\"\n logger.error(error_message)\n traceback.print_exc()\n raise exception\n finally:\n if connection is not None:\n connection.close()",
"def fetch_mines(self, planet=None):\n return self.fetch_levels(\"resources\", planet, codes.mines)",
"def get_all_masses(self):\n allMasses = set()\n for interval in self.mz_tree:\n allMasses.add( interval.data[\"mass\"] )\n\n return allMasses",
"def warping_grp(self, run_idx):\n return self.records_grp(run_idx, WARPING)",
"def get_mke_scores():\n _scores = {k:[] for k in time_str_to_time.keys()} \n _scores['all'] = [] # add key for all milwaukee and all time zones\n for zip_ in zip_populations.keys(): \n res = query_all_crimes(zip_=zip_)\n print(f'[PROCESSING] {zip_}')\n crimes = to_df(res)\n create_crime_cat(crimes)\n integrate_weight_to_df(crimes)\n for time_sl in time_str_to_time.keys():\n sub = extract_crimes_by_sl(crimes, time_str_to_time[time_sl]) \n cas = compute_crime_score(sub, zip_) \n _scores[time_sl].append(cas)\n _scores['all'].append(cas)\n return _scores",
"def query_intrastate_wars(req_war_id):\n\tthis_query = Query('intrastate', req_war_id)\n\tthis_query.send_query()\n\tresponse = this_query.pull_result()\n\treturn jsonify(response)\n\t#return render_template('response.html', response=response)",
"def get_matching_war(self, clan, war=None):\n\n if war and war.date_start <= self.time <= war.date_end:\n return war\n\n try:\n return ClanWar.objects.get(\n clan=clan,\n date_start__lte=self.time,\n date_end__gte=self.time\n )\n except ClanWar.DoesNotExist:\n return None\n except ClanWar.MultipleObjectsReturned:\n return None",
"def get_all_boards():\n return [board for board in GRAPH_DB.find(\"board\")]",
"def mlbweather(self, irc, msg, args, optteam):\n \n optteam = optteam.upper().strip()\n\n if optteam not in self._validteams():\n irc.reply(\"Team not found. Must be one of: %s\" % self._validteams())\n return\n \n url = self._b64decode('aHR0cDovL3d3dy5wYXJrZmFjdG9ycy5jb20v')\n\n try:\n req = urllib2.Request(url)\n html = (urllib2.urlopen(req)).read()\n except:\n irc.reply(\"Failed to open: %s\" % url)\n return\n \n if \"an error occurred while processing this directive\" in html:\n irc.reply(\"Something broke with parkfactors. Check back later.\")\n return\n \n html = html.replace('&','&').replace('ARZ','ARI').replace('CHW','CWS').replace('WAS','WSH').replace('MLW','MIL') # need some mangling.\n\n soup = BeautifulSoup(html)\n h3s = soup.findAll('h3')\n\n object_list = []\n\n for h3 in h3s:\n park = h3.find('span', attrs={'style':'float: left;'})\n factor = h3.find('span', attrs={'style': re.compile('color:.*?')})\n matchup = h3.findNext('h4').find('span', attrs={'style':'float: left;'})\n winddir = h3.findNext('img', attrs={'class':'rose'})\n windspeed = h3.findNext('p', attrs={'class':'windspeed'}).find('span')\n weather = h3.findNext('h5', attrs={'class':'l'})\n if weather.find('img', attrs={'src':'../images/roof.gif'}):\n weather = \"[ROOF] \" + weather.text \n else:\n weather = weather.text.strip()\n\n d = collections.OrderedDict()\n d['park'] = park.renderContents().strip()\n d['factor'] = factor.renderContents().strip()\n d['matchup'] = matchup.renderContents().strip()\n d['winddir'] = str(''.join(i for i in winddir['src'] if i.isdigit()))\n d['windspeed'] = windspeed.renderContents().strip()\n d['weather'] = weather.replace('.Later','. Later').replace('°F','F ')\n object_list.append(d)\n\n output = False \n \n for each in object_list:\n if optteam in each['matchup']:\n output = \"{0} at {1}({2}) Weather: {3} Wind: {4}mph ({5}deg)\".format(ircutils.underline(each['matchup']),\\\n each['park'], each['factor'], each['weather'], each['windspeed'], each['winddir'])\n \n if not output:\n irc.reply(\"No match-up found for: %s\" % optteam)\n return\n else:\n irc.reply(output)",
"def query_extrastate_wars(req_war_id):\n\tthis_query = Query('extrastate', req_war_id)\n\tthis_query.send_query()\n\tresponse = this_query.pull_result()\n\t#response = json.dumps(response)\n\treturn jsonify(response)",
"def list_missions(self):\n\n # getting all the histogram information\n service = \"Mast.Caom.All\"\n params = {}\n response = self.service_request_async(service, params, format='extjs')\n jsonResponse = response[0].json()\n\n # getting the list of missions\n histData = jsonResponse['data']['Tables'][0]['Columns']\n for facet in histData:\n if facet['text'] == \"obs_collection\":\n missionInfo = facet['ExtendedProperties']['histObj']\n missions = list(missionInfo.keys())\n missions.remove('hist')\n return missions",
"def get_cities(self, city_name: str = None):",
"def getMyArmies(self):\n r = []\n for army in self.__armies:\n if (army.getOwner() == 1):\n r.append(army)\n return r",
"def find_all(self, params={}, **options):\n return self.client.get_collection(\"/workspaces\", params, **options)",
"def list_mc_servers(self, by_name=False, all_data=False):\n status, data, errors, messages = self._make_get_request(MCAPIRoutes.LIST)\n \n if status == 200:\n if by_name:\n y = 0\n returnData = dict()\n for items in data['servers']:\n returnData[y] = items.get(\"id\", 0)\n y += 1\n returnData[y] = items.get(\"name\", 0)\n return returnData\n if all_data:\n y = 0\n returnData = dict()\n for items in data['servers']:\n returnData[y] = items.get(\"id\", 0)\n y += 1\n returnData[y] = items.get(\"name\", 0)\n y += 1\n returnData[y] = items.get(\"running\", 0)\n y = y + 1\n returnData[y] = items.get(\"auto_start\", 0)\n return returnData\n del returnData\n else:\n return data['servers']\n elif status == 500:\n self._check_errors(errors, messages)"
] | [
"0.530085",
"0.5216914",
"0.52164996",
"0.52158",
"0.512819",
"0.5046316",
"0.49238253",
"0.48991495",
"0.48724052",
"0.4854176",
"0.48369068",
"0.47993955",
"0.4775364",
"0.47421068",
"0.47291753",
"0.46694636",
"0.4650255",
"0.46441507",
"0.46405992",
"0.46394694",
"0.46272328",
"0.4622484",
"0.4615833",
"0.4612752",
"0.4594808",
"0.45941487",
"0.45882702",
"0.45876667",
"0.4579765",
"0.45696202"
] | 0.69981116 | 0 |
Get list of all war eras. | def get_war_eras():
return datasources_service.get_war_eras() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ewriters():\n return dict(_ewriters)",
"def get_morbidities_for_war_era():\n war_era_name = request.args.get('warEra')\n if not war_era_name:\n raise BadRequestError(\"warEra parameter is missing\")\n return datasources_service.get_morbidities_for_war_era(war_era_name)",
"def list_shelves(self):\n shelflist = []\n for i in self.get_shelves():\n shelflist.append(i)\n return shelflist",
"def get_resources(self):\n return []",
"def get_all(self):\n\n url = 'equipment/all'\n\n code, xml = self.submit(None, 'GET', url)\n\n return self.response(code, xml)",
"def list_all(self):\n\n url = 'equipamento/list/'\n\n code, xml = self.submit(None, 'GET', url)\n\n return self.response(code, xml)",
"def get_all_ribs_per_router(self):\n return self._get_all_ribs(lambda r: r.name)",
"def list_router(self):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/routers.json\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.project_info[\"token_project\"]}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while listing routers.\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"List router Failed with status %s \" %\n response.status)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Router List : %s \" % output)\n\n return output[\"routers\"]",
"def get_all_environments():\n return ENVIRONMENTS",
"def all(self) -> list[dict[str, Any]]:\n return self.client.get(self._url())",
"def alerts_all_zones(self: SimpleNWS) -> List[Dict[str, Any]]:\n return self._alerts_all_zones",
"def get_all_explorations():\n return [exp_domain.Exploration(e) for e in\n exp_models.ExplorationModel.get_all()]",
"def get_etfs_list(self):\n return list(self.etfs.keys())",
"def resources(self):\n return list(self.get_resources_for_type(gdef.ResType_All))",
"def all_errata(self):\n return self._all_errata",
"def get_all_reporters():\r\n for ep in iter_entry_points('attest.reporters'):\r\n yield ep.name",
"def get_all_entities(self):\n return Artifact.get_all()",
"def _get_all_resources(self):\n all_resources = []\n for resource in ResourceModel.scan():\n all_resources.append(resource)\n return all_resources",
"def get_laser_echoes(self):\n return self._request_data(\"/lokarria/laser/echoes\")",
"def etls(self):\r\n return self._etls",
"def get_all_thermals(self):\n return self._thermal_list",
"def get_possible_absentees(self) -> List[QualifiedAgent]:\n wum: WorklistUpdateManagerApi = self._service_provider.get_service(WorklistUpdateManagerApi)\n return self._rem_iter_handler.consume(\n wum.get_possible_absentees(),\n \"agents\",\n PossAbsRemoteIteratorApi,\n PossAbsRemoteIteratorApi.poss_abs_get_next,\n )",
"def all_hosts(self):\n ...",
"def list_runs(self):\n postresult = requests.get(\n f\"{self.proto}://{self.host}/ga4gh/wes/v1/runs\", headers=self.auth\n )\n return wes_reponse(postresult)",
"def get_all_routers(self):\n import network\n sta_if = network.WLAN(network.STA_IF)\n sta_if.active(True)\n all_routers = sta_if.scan()\n\n routers = []\n for router_tuple in all_routers:\n router = Router(router_tuple[0], router_tuple[1], router_tuple[3])\n routers.append(router)\n\n return routers",
"def list(self):\n path = \"authSettings/exemptedUrls\"\n return self._session.get(path)",
"def rehearsal_list(request_dict):\n rehearsals = Rehearsal.query.all()\n rehearsals_list = list()\n for rehearsal in rehearsals:\n rehearsals_list.append(rehearsal)\n\n return JSONTools.rehearsal_list_reply(rehearsals_list)",
"def find_all(client):\n return list(map(lambda s: Site(s), client.get_api_resource(\"self/sites\")))",
"def episodes(self):\n episodes = []\n for series in self.series:\n episodes.extend(series.episodes)\n return episodes",
"def get_all(self):\n\n servers = self._scoped_servers()\n servers = [{u'id': x.id, u'name': x.name} for x in servers]\n return self.format_collection(servers)"
] | [
"0.5828334",
"0.5808068",
"0.57432085",
"0.5644334",
"0.56183976",
"0.5573372",
"0.5563877",
"0.55565184",
"0.55515826",
"0.55351365",
"0.55152845",
"0.55032134",
"0.54578054",
"0.54362935",
"0.54297596",
"0.5429045",
"0.5412836",
"0.54041094",
"0.5400069",
"0.5393652",
"0.5388164",
"0.53848094",
"0.5379429",
"0.53649694",
"0.5347273",
"0.5325162",
"0.53024733",
"0.5293844",
"0.5244459",
"0.52426714"
] | 0.7025474 | 0 |
Cancel an withdraw request. | def post_cancel_withdraw(self, withdraw_id: 'int') -> int:
params = {
"withdraw-id": withdraw_id
}
from huobi.service.wallet.post_cancel_withdraw import PostCancelWithdrawService
return PostCancelWithdrawService(params).request(**self.__kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)"
] | [
"0.66425055",
"0.66425055",
"0.66425055",
"0.66425055",
"0.66425055",
"0.66425055",
"0.66425055",
"0.66425055",
"0.66425055",
"0.66425055",
"0.66425055",
"0.66425055",
"0.66425055",
"0.66425055",
"0.66425055",
"0.66425055",
"0.66425055",
"0.66425055",
"0.66425055",
"0.66425055",
"0.66425055",
"0.66425055",
"0.66425055",
"0.66425055",
"0.66425055",
"0.66425055",
"0.66425055",
"0.66425055",
"0.66425055",
"0.66425055"
] | 0.6858336 | 0 |
Get the withdraw quota for currencies | def get_account_withdraw_quota(self, currency: 'str') -> list:
check_should_not_none(currency, "currency")
params = {
"currency": currency,
}
from huobi.service.wallet.get_account_withdraw_quota import GetAccountWithdrawQuotaService
return GetAccountWithdrawQuotaService(params).request(**self.__kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def options_to_withdraw(self, amount):\n counter = PaperMoneyCounter() # aux class\n options = [] # options to withdraw\n remaining_cash = 0 # aux var\n\n if (amount % 20 == 0 or amount % 50 == 0) and (amount <= 1000): # is it allowed to withdraw?\n # prioritizing 100-dollar bills\n qtt_100s = counter.how_many_100s(amount)\n remaining_cash = counter.remaining_cash_without_100s(amount)\n\n qtt_50s = counter.how_many_50s(remaining_cash)\n remaining_cash = counter.remaining_cash_without_50s(remaining_cash)\n\n qtt_20s = counter.how_many_20s(remaining_cash)\n remaining_cash = counter.remaining_cash_without_20s(remaining_cash)\n\n if counter.cash(qtt_100s, qtt_50s, qtt_20s) == amount:\n options.append([int(qtt_100s), int(qtt_50s), int(qtt_20s)])\n\n # prioritizing 50-dollar bills\n qtt_100s = 0\n\n qtt_50s = counter.how_many_50s(amount)\n remaining_cash = counter.remaining_cash_without_50s(amount)\n\n qtt_20s = counter.how_many_20s(remaining_cash)\n remaining_cash = counter.remaining_cash_without_20s(remaining_cash)\n\n if counter.cash(qtt_100s, qtt_50s, qtt_20s) == amount:\n if not(options[0] == [qtt_100s, qtt_50s, qtt_20s]):\n options.append([int(qtt_100s), int(qtt_50s), int(qtt_20s)])\n\n # prioritizing 20-dollar bills\n qtt_100s = 0\n\n qtt_50s = 0\n\n qtt_20s = counter.how_many_20s(amount)\n\n if counter.cash(qtt_100s, qtt_50s, qtt_20s) == amount:\n if not(options[0] == [qtt_100s, qtt_50s, qtt_20s]):\n if not(options[1] == [qtt_100s, qtt_50s, qtt_20s]):\n options.append([int(qtt_100s), int(qtt_50s), int(qtt_20s)])\n\n return options\n\n return None # if it wasn't allowed to withdraw",
"def quota(self) -> 'outputs.CommitmentQuotaResponse':\n return pulumi.get(self, \"quota\")",
"def get_quota(self):\n raise NotImplementedError",
"def withdraw(account, amount):\n pass",
"def getActiveCurrencies():",
"def get_send_quota(self):\r\n return self._make_request('GetSendQuota')",
"def quota(self) -> int:\n return pulumi.get(self, \"quota\")",
"def getCurrencies():",
"def withdraw(self, currency, amount, address):\n return self.api_query('withdraw', {\"currency\": currency, \"amount\": amount, \"address\": address})",
"def withdraw(self, amount):\r\n balance = self['get']('balance')\r\n if amount > balance:\r\n return 'Insufficient funds'\r\n self['set']('balance', balance - amount)\r\n return self['get']('balance')",
"def getBaseCurrency():",
"def online_quota(self):\r\n return self.max_contributions - self.num_tickets_total",
"def withdraw_money(c_id, amount):\n return ar.withdraw_money(c_id, amount)",
"def getActiveCurrency():",
"def withdraw(self, currency, amount, address):\n return self.__call__('balance', 'withdrawcurrency',\n {\"currencyname\": currency, \n \"quantity\": amount, \n \"address\": address})",
"def account_space(access_token):\n client = dropbox.client.DropboxClient(access_token)\n account_info = client.account_info()\n quota_info = account_info['quota_info']\n total = quota_info['quota']\n used = quota_info['normal'] + quota_info['shared']\n return total - used",
"def withdraw(self, amount):\n self.transactions += [('withdraw', amount)]\n if amount > self.balance:\n return 'Insufficient funds'\n self.balance = self.balance - amount\n return self.balance",
"def withdraw(holder):\n account = Account.query.filter_by(holder=holder).first()\n amount = request.json.get(\"amount\")\n if not account:\n return jsonify({\"error\": \"Account does not exist\"})\n if account.balance >= amount:\n account.balance -= amount\n db.session.commit()\n return jsonify(\n {\n \"holder\": account.holder,\n \"balance\": account.balance,\n \"message\": \"The withdraw has been processed\",\n }\n )\n return jsonify({\"error\": \"The account balance is insufficient\"})",
"def withdraw(self, amount):\n if amount > self.balance:\n raise ValueError('insufficient funds to withdraw $%.2f' % amount)\n self.balance -= amount\n return self.balance",
"def withdraw(self, amount):\n if amount > self.balance:\n return 'Insufficient funds'\n self.balance = self.balance - amount\n return self.balance",
"def getUserCurrency():",
"def withdraw(self, amount):\n if self.overdrawn:\n print('You have overdrawn, please add more money!')\n return self.balance\n self.balance = self.balance - amount\n return self.balance",
"def deposits_limit(self):\n limits = self.user.limits\n value = 0\n if limits.exists():\n value = self.user.limits.get(type=Limit.DEPOSIT).value\n return value",
"def get_fiat_balance():\n return get_balance(CONF.quote)",
"def withdrawMoney(self, withdraw_amount):\r\n self.balance_amt = self.balance_amt - withdraw_amount",
"def amount_to_pay_in_period(self):\n assert self.type == \"N\", _(\"Subscription must be normal to use this method\")\n period_start, period_end = self.get_current_period()\n price_per_day = (\n self.get_price_for_full_period() / (period_end - period_start).days\n )\n days_not_used = 30 * self.frequency - (date.today() - period_start).days\n return int(price_per_day * days_not_used)",
"def get_quota(self):\n path = 'urlCategories/urlQuota'\n return self._session.get(path)",
"def balance(self) -> Decimal:\n withdrawals = self.withdrawal_requests.filter(\n status=WithdrawalStatus.open,\n )\n if len(withdrawals) == 0:\n return self.internal_balance\n else:\n withdrawal_total = sum(map(lambda w: w.amount, withdrawals))\n return self.internal_balance - withdrawal_total",
"def withdraw(self, currency, amount, address):\n pass",
"def withdrawMoney(self, withdraw_amount):\r\n if (self.balance_amt - withdraw_amount) > 0:\r\n self.balance_amt = self.balance_amt - withdraw_amount\r\n else:\r\n raise WithdrawError #Exception('Overdraft withdrawal Error. Cannot withdraw more than amount in account balance: {}'.format(self.balance_amt))\r"
] | [
"0.63382167",
"0.62305385",
"0.6179834",
"0.60655946",
"0.5956213",
"0.5947808",
"0.59429044",
"0.58825815",
"0.58773303",
"0.5864017",
"0.58567953",
"0.58432806",
"0.5827308",
"0.5814937",
"0.57675916",
"0.57590467",
"0.5756222",
"0.57123595",
"0.5685044",
"0.5678759",
"0.5667374",
"0.5618233",
"0.56103253",
"0.56043047",
"0.55661595",
"0.5557036",
"0.5556752",
"0.55559623",
"0.5546103",
"0.55141354"
] | 0.6826426 | 0 |
Parent get sub user depoist history. | def get_sub_user_deposit_history(self, sub_uid: 'int', currency: 'str' = None,
start_time: 'int' = None, end_time: 'int' = None,
sort: 'str' = None, limit: 'int' = None, from_id: 'int' = None) -> DepositHistory:
check_should_not_none(sub_uid, "sub_uid")
params = {
"subUid": sub_uid,
"currency": currency,
"startTime": start_time,
"endTime": end_time,
"sort": sort,
"limit": limit,
"fromId": from_id
}
from huobi.service.wallet.get_sub_user_deposit_history import GetSubUserDepositHistoryService
return GetSubUserDepositHistoryService(params).request(**self.__kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def last_history(self, user):\n return History.objects(user=user).order_by('-created_at').first()",
"def user_history(self):\n self.query_1 = \"SELECT * FROM orders WHERE user_id=%s\"\n self.input_1 = (self.user_id,) \n self.event = \"user_history\"\n self.message = \"Order history fetched successfully.\"\n self.error = \"Unable to fetch order history.\"",
"def getUsernameHistory(UserId):\n url = f\"https://users.roblox.com/v1/users/{UserId}/username-history\"\n r = requests.get(url)\n j = json.loads(r.text)\n data = j['data']\n return data",
"def show_history(user_id):\n return History.where('user_id', user_id).get()",
"def get_user_purchase_history_admin(user_name, other_user_name):\n\n user_name = auth.get_username_from_hash(user_name)\n # user_handler.is_permitted_to_do(user_name, None, 1 << Action.USER_PURCHASE_HISTORY.value)\n # check if admin\n return purchase_handler.get_user_purchase_history(other_user_name)",
"def show_history(self, user: TelegramController.User):\n\n header = connect(self.__path)\n curs = header.cursor()\n encrypted_id = md5((str(user.id) + \"typicaluser\").encode()).hexdigest()\n curs.execute(\"SELECT * FROM users WHERE id = (?)\", (encrypted_id,))\n data = curs.fetchall()[0][1]\n return data",
"def get_user_history (history_id=None):\n history_id = history_id or os.environ['HISTORY_ID']\n gi = get_galaxy_connection(history_id=history_id, obj=False)\n hc = HistoryClient(gi)\n history = hc.show_history(history_id, visible=True, contents=True)\n return history",
"def history():\n user_history=db.execute(\"SELECT * FROM history WHERE user_id=:u_i\",u_i=session[\"user_id\"])\n return render_template(\"history.html\",s=user_history)",
"def get_history(self):\n return self.history",
"def history():",
"def History(self):\n return self.historydict.get('history', [])",
"def history():\n transactions = db.execute(\"SELECT * FROM history WHERE user_id = ?\", session[\"user_id\"])\n user_name = db.execute(\"SELECT username, cash FROM users WHERE id = ?\", session[\"user_id\"])\n \n return render_template(\"history.html\", transactions=transactions, user_name=user_name[0][\"username\"])",
"def history(self):\n return self.info['history']",
"def history():\n\n # Access user's id\n user_id = session[\"user_id\"]\n\n # Obtain history information for logged in user\n TRANSACTIONS = db.execute(\"SELECT * FROM history WHERE user_id = ? ORDER BY transacted DESC\", user_id)\n\n return render_template(\"history.html\", transactions=TRANSACTIONS)",
"def QueryHistory(self):\n return []",
"def account_df_history(self, improve=False):\n return(self.account_df('history', improve))",
"def do_gethistory(self,args):\n #Very rough. pretty print it\n history=bitstamp.get_usertransactions()\n ppdict(history)",
"def history():\n\n entry = db.execute(\"SELECT * FROM users WHERE id=:id\",\n id=session['user_id'])\n user = entry[0]['username']\n owned = db.execute(\"SELECT * FROM transactions WHERE user=:user ORDER BY date\",\n user=user)\n\n return render_template(\"history.html\", stocks = owned)",
"def history():\n \n user_id = session[\"user_id\"]\n history_list = hist(user_id, db)\n return render_template('history.html', history=history_list)",
"def history():\n query = Records.query.filter_by(user_id=session.get(\"user_id\")).all()\n return render_template(\"history.html\", rows=query)",
"def history():\n \n value_dicts = db.execute(\"SELECT * FROM history WHERE user_id = :usid\", usid=session[\"user_id\"])\n return render_template(\"history.html\", value_dicts=value_dicts)",
"def get_history(self):\n return self.__history[:]",
"def get_user_purchases_history(user_name):\n user_name = auth.get_username_from_hash(user_name)\n return purchase_handler.get_user_purchase_history(user_name)\n # return user_handler.get_user_purchase_history(user_name)",
"def history():\n\n # get all transactions for current user\n transactions = db.execute(\"SELECT * FROM transactions WHERE user_id = :user_id\", user_id=session[\"user_id\"])\n\n # render history.html with all user transactions\n return render_template(\"history.html\", transactions=transactions, usd=usd)",
"def history():\n # name variable to show current users name in template\n name = db.execute(\"SELECT username FROM users WHERE id=:id\", id=session[\"user_id\"])\n\n # user's transaction history\n hist = db.execute(\"SELECT transactid, name, price, quantity, date FROM portfolio WHERE userid = :userid\", userid=session[\"user_id\"])\n\n # return the template with the relevant objects for jinja\n return render_template(\"history.html\", name=name, hist=hist)\n\n # if function fails\n return apology(\"Can't display history\", 400)",
"def history():\n\n rows = db.execute(\"SELECT * FROM 'transaction' WHERE u_id = :user_id\", user_id = session[\"user_id\"])\n return render_template(\"history.html\", rows = rows)",
"def history():\n\n user_id = session.get('user_id')\n table_name = f'stocks_user{user_id}'\n rows = db.execute(\"SELECT * FROM ?\", table_name)\n\n return render_template('history.html', rows=rows)",
"def orders_history(self): \n return(self._d_orders['history'])",
"def history(self):\n return _uhd_swig.usrp_sink_sptr_history(self)",
"async def history(self, ctx, user_id: str):\n\n session = self.bot.helpers.get_db_session()\n try:\n self.bot.log.info(\n f\"CMD {ctx.command} called by {ctx.message.author} ({ctx.message.author.id})\"\n )\n guild = ctx.message.guild\n user = await self.bot.helpers.get_member_or_user(user_id, guild)\n if not user:\n return await ctx.send(\n f\"Unable to find the requested user. Please make sure the user ID or @ mention is valid.\"\n )\n\n (\n embed_result_entries,\n footer_text,\n ) = await self.bot.helpers.get_action_history(session, user, guild)\n\n p = FieldPages(ctx, per_page=8, entries=embed_result_entries,)\n p.embed.color = 0xFF8C00\n p.embed.set_author(\n name=f\"Member: {user} ({user.id})\", icon_url=user.avatar_url\n )\n p.embed.set_footer(text=footer_text)\n await p.paginate()\n except discord.HTTPException as err:\n self.bot.log.exception(\n f\"Discord HTTP Error responding to {ctx.command} request via Msg ID {ctx.message.id}. {sys.exc_info()[0].__name__}: {err}\"\n )\n await ctx.send(\n f\"Error processing {ctx.command}. Error has already been reported to my developers.\"\n )\n except DBAPIError as err:\n self.bot.log.exception(\n f\"Error logging note to database. {sys.exc_info()[0].__name__}: {err}\"\n )\n await ctx.send(\n f\"Error processing {ctx.command}. Error has already been reported to my developers.\"\n )\n session.rollback()\n except Exception as err:\n self.bot.log.exception(\n f\"Error responding to {ctx.command} via Msg ID {ctx.message.id}. {sys.exc_info()[0].__name__}: {err}\"\n )\n await ctx.send(\n f\"Error processing {ctx.command}. Error has already been reported to my developers.\"\n )\n finally:\n session.close()"
] | [
"0.61068934",
"0.6100039",
"0.60115993",
"0.59361994",
"0.59344065",
"0.58696795",
"0.5844484",
"0.5742806",
"0.571162",
"0.5664033",
"0.56583655",
"0.56562835",
"0.5651735",
"0.56295484",
"0.5605254",
"0.5565101",
"0.55631995",
"0.55622804",
"0.55606115",
"0.54985034",
"0.549455",
"0.5494534",
"0.5458319",
"0.54354095",
"0.54311186",
"0.5425557",
"0.54193866",
"0.54190576",
"0.5417586",
"0.5395642"
] | 0.61268777 | 0 |
Add an obstacle to the map | def add_obstacle(self, obstacle_to_add):
if self.obstacles.size != 0:
self.obstacles = np.hstack((self.obstacles, obstacle_to_add))
else:
self.obstacles = np.array([obstacle_to_add]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_obstacle(self, x, y):\n self.BOARD[y][x].traversable = False\n self.board_array[y][x] = 1",
"def add_obstacle(self, x, y):\n self.BOARD[y][x].traversable = False\n self.board_array[y][x] = 1",
"def add_obstacle(self, *points: Tuple[float, float]):\n self.obstacles.append(o.Obstacle(*points))",
"def set_obstacle(self, pos: tuple):\n if self.within_map(pos):\n self.map[round(pos[0]), round(pos[1])] = OBSTACLE\n return True\n else:\n return False",
"def update_obstacle_location(self):\n\n # find the previous location of the obstacle\n old_y = self.map_obstacle.y\n old_x = self.map_obstacle.x\n\n # remove it from the main graph\n self.main_graph[old_y][old_x].contents.remove(self.map_obstacle)\n\n # get the latest location\n self.map_obstacle.update_location()\n (new_y, new_x) = (self.map_obstacle.y, self.map_obstacle.x)\n\n # add it back into the main graph\n self.main_graph[new_y][new_x].contents.add(self.map_obstacle)\n\n # update the map obstacle (not necessary, but it doesn't hurt)\n self.map_obstacle.y = new_y\n self.map_obstacle.x = new_x",
"def update_obstacles(self, new_obs):\n self.obstacles = new_obs",
"def __init__(self, map_obstacle, main_graph):\n\n self.map_obstacle = map_obstacle\n self.main_graph = main_graph\n\n self.sight_range = self.calculate_sight_range()\n\n self.top_left_y = None\n self.top_left_x = None\n self.bottom_right_y = None\n self.bottom_right_x = None\n self.height = None\n self.width = None\n self.size = self.calculate_size()\n\n # nodes specific to this threat zone\n self.nodes = []",
"def random_map(self, world):\n obstacles = []\n if self.cfg[\"obstacle\"][\"octagon\"][\"enabled\"]:\n obstacles += self.__generate_octagon_obstacles(world)\n if self.cfg[\"obstacle\"][\"rectangle\"][\"enabled\"]:\n obstacles += self.__generate_rectangle_obstacles(world)\n\n # update the current obstacles and goal\n self.current_obstacles = obstacles\n self.add_new_goal()\n\n # apply the new obstacles and goal to the world\n self.apply_to_world(world)",
"def place_obstacles():\n #Randomly generate different sized rectangles\n #Soem may overlap, which gives more variety in shape of obstacles\n xvals = np.random.randint(0,self.map_dimensions[1],size=self.N_obstacles)\n yvals = np.random.randint(0,self.map_dimensions[0],size=self.N_obstacles)\n lower_left = zip(xvals,yvals)\n rects = []\n for LL in lower_left:\n x = LL[0]\n y = LL[1]\n wmax = self.map_dimensions[1] - x\n w = np.random.randint(0,wmax,size=1)[0]\n hmax = self.map_dimensions[0] - y\n h = np.random.randint(0,hmax,size=1)[0]\n rects += [(x,y,w,h)]\n self.coordinates__obstacles = rects",
"def add_neighbors(self, pos, distance, obstacles):\n \n neighbor_list = [(pos[0]-1,pos[1]), (pos[0]+1,pos[1]), \\\n (pos[0],pos[1]-1), (pos[0], pos[1]+1)]\n # Processing each neighbor.\n for (x,y) in neighbor_list:\n if x>=0 and y>=0 and x<self.M and y<self.N: # Out from boundary?\n if (x,y) not in obstacles:\n if (x,y) not in self.footprint: # Already in done list?\n new_distance = distance + 1 + self.heuristic_map[x,y]\n if (x,y) not in self.frontier.keys(): # A new candidate to add to frontier set.\n self.frontier.update({(x,y):new_distance})\n self.distance_map[x,y] = distance + 1\n self.camefrom_map[(x,y)] = pos\n elif new_distance < self.frontier[(x,y)]: # A short path reached this neighbor.\n self.frontier[(x,y)] = new_distance\n self.distance_map[x,y] = distance + 1\n self.camefrom_map[(x,y)] = pos",
"def _find_obstacle(self, obstacle_type='*traffic_light*'): \r\n obst = list()\r\n \r\n _actors = self._world.get_actors()\r\n _obstacles = _actors.filter(obstacle_type)\r\n\r\n\r\n for _obstacle in _obstacles:\r\n trigger = _obstacle.trigger_volume\r\n\r\n _obstacle.get_transform().transform(trigger.location)\r\n \r\n distance_to_car = trigger.location.distance(self._vehicle.get_location())\r\n\r\n a = np.sqrt(\r\n trigger.extent.x ** 2 +\r\n trigger.extent.y ** 2 +\r\n trigger.extent.z ** 2)\r\n b = np.sqrt(\r\n self._vehicle.bounding_box.extent.x ** 2 +\r\n self._vehicle.bounding_box.extent.y ** 2 +\r\n self._vehicle.bounding_box.extent.z ** 2)\r\n\r\n s = a + b + 10\r\n \r\n if distance_to_car <= s:\r\n # the actor is affected by this obstacle.\r\n obst.append(_obstacle)\r\n\r\n \"\"\"self._debug.draw_box(carla.BoundingBox(_obstacle.get_transform().location, carla.Vector3D(0.5,0.5,2)),\r\n _obstacle.get_transform().rotation, \r\n 0.05, \r\n carla.Color(255,255,0,0),\r\n 0\r\n )\"\"\"\r\n \"\"\"self._debug.draw_box(carla.BoundingBox(trigger.location, carla.Vector3D(0.1,0.1,10)),\r\n _obstacle.get_transform().rotation, \r\n 0.05, \r\n carla.Color(255,0,0,0),\r\n 0\r\n )\"\"\"\r\n \r\n \"\"\"self._debug.draw_box(carla.BoundingBox(trigger.location, carla.Vector3D(0.1,0.1,2)),\r\n _obstacle.get_transform().rotation, \r\n 0.05, \r\n carla.Color(255,0,0,0),\r\n 0\r\n )\"\"\"\r\n \"\"\"self._debug.draw_box(trigger,\r\n _obstacle.get_transform().rotation, \r\n 0.05, \r\n carla.Color(255,0,0,0),\r\n 0\r\n )\"\"\"\r\n\r\n return obst",
"def draw_obstacles():\n for obstacle in obstacles:\n plt.gca().add_patch(obstacle)",
"def init_map(self, obstacle_rate=0.9):\n n = self.size()\n\n map_obstacles = [] # np.zeros((n, n)) # 1: obstacle, 0: non-obstacle\n \n for i in range(n):\n # We only need 2 bit to encode 1/0 for each element of NumberArray\n row = NumberArray(2, n)\n for j in range(n):\n if i == j:\n # map_obstacles[i][j] = 0\n row[j] = 0\n elif i > j:\n # map_obstacles[i][j] = map_obstacles[j][i]\n row[j] = map_obstacles[j][i]\n else:\n # map_obstacles[i][j] = 1 if random.random() > 0.9 else 0\n row[j] = 1 if random.random() > obstacle_rate else 0\n map_obstacles.append(row)\n\n self.map_obstacle = map_obstacles",
"def updateHardObstacles(self):\r\n global_obs = self.calcGlobalObstaclePosition([[10, 20],[10, 0],[10, -20]])\r\n self.globalHardObstaclesList.extend(global_obs)",
"def draw_obstacle(start, end, img):\n # start, end, top_right, top_left = generate_obstacle_point(start, (start[0] + _OBSTACLE_SIZE, start[1] ))\n cv2.fillPoly(img, np.array([[[start[0] - 25, start[1] - 25],\n [start[0] + 25, start[1] - 25],\n [start[0] + 25, start[1] + 25],\n [start[0] - 25, start[1] + 25]]]), _RED)\n # cv2.rectangle(img, (start[0] - 25, start[1] - 25), (start[0] + 25, start[1] + 25), (0, 255, 0), 3)\n return img",
"def is_obstacle(self, pos: tuple):\n if self.within_map(pos):\n return self.map[round(pos[0]), round(pos[1])] == OBSTACLE\n else:\n return False",
"def publishObstacles(self):\n mk = Marker()\n mk.header.stamp = rospy.get_rostime()\n mk.header.frame_id = '/base_link'\n\n mk.ns='basic_shapes'\n mk.id = 0\n mk.type = Marker.POINTS\n mk.scale.x = 0.3\n mk.scale.y = 0.3\n mk.scale.z = 0.3\n mk.color.r = 1.0\n mk.color.a = 1.0\n\n for value in self.obstacle_map.obstacles_in_memory:\n p = Point()\n p.x = value[0]\n p.y = value[1]\n mk.points.append(p)\n\n\n self.obs_pub.publish(mk)",
"def __init__(self, map_config):\n self.current_obstacles = []\n self.current_goal = None\n self.cfg = map_config",
"def updateObstacleMap(self):\n\n all_sensor_readings = self.laser_readings + self.sonar_readings\n\n #we remove all the sensor readings that occur inside the robot frame\n restricted_sensor_readings = []\n for pt in all_sensor_readings:\n if not self.obstacle_map.inRobot(pt):\n restricted_sensor_readings.append(pt)\n\n #add the obstacles to the obstacle map\n self.obstacle_map_lock.acquire()\n self.obstacle_map.addObstacles(restricted_sensor_readings)\n self.obstacle_map_lock.release()\n\n return",
"def process_obstacle(color, cx, cy, box, x, y, obj_length, obj_height, obj_depth,\n\t\t\t\t\t equi_diameter, obstacle_list, obstacle_lifetime, obstacle_id, visualize, send_data):\n\tcoords = list(depth_to_point_cloud_pos(cx, cy, obj_depth)) # convert obstacle depth to XYZ coordinate\n\n\t#theta = CameraPosition['azimuth'] * math.pi / 180 # get robot pitch angle in radians\n\t#coords[0] = CameraPosition['x'] - coords[0] * math.cos(theta) # convert relative obstacle position to global\n\t#coords[2] = CameraPosition['y'] + coords[2] * math.sin(theta)\n\tmm_diameter = equi_diameter * (1.0 / CameraParams['fx']) * obj_depth # convert pixel diameter to mm\n\n\tif 100 < mm_diameter < 400:\n\t\tnew_obstacle = True\n\t\tcurrent_obstacle = None\n\t\tfor obstacle in obstacle_list:\n\t\t\tx_match = abs(obstacle.x - coords[0]) < 0.3\n\t\t\ty_match = abs(obstacle.y - coords[2]) < 0.3\n\t\t\tz_match = abs(obstacle.z - coords[1]) < 0.5\n\t\t\tdiameter_match = abs(obstacle.diameter - mm_diameter) / 1000. < 0.5\n\t\t\tif x_match and y_match:\n\t\t\t\tobstacle.x = coords[0]\n\t\t\t\tobstacle.y = coords[2]\n\t\t\t\tobstacle.z = coords[1]\n\t\t\t\tobstacle.diameter = mm_diameter / 1000.\n\t\t\t\tnew_obstacle = False\n\t\t\t\tobstacle.lifetime = obstacle_lifetime\n\t\t\t\tif send_data:\n\t\t\t\t\tsend_obstacle_data(obstacle)\n\t\t\t\tcurrent_obstacle = Obstacle(obstacle.id,\n\t\t\t\t\t\t\t\t\t\t\tobstacle.x,\n\t\t\t\t\t\t\t\t\t\t\tobstacle.y,\n\t\t\t\t\t\t\t\t\t\t\tobstacle.z,\n\t\t\t\t\t\t\t\t\t\t\tobstacle.diameter,\n\t\t\t\t\t\t\t\t\t\t\tobstacle_lifetime)\n\t\t\t\tif obstacle.lifetime == 0:\n\t\t\t\t\tobstacle_list.remove(obstacle)\n\t\t\t\tbreak\n\t\tif new_obstacle:\n\t\t\tcurrent_obstacle = Obstacle(obstacle_id,\n\t\t\t\t\t\t\t\t\t\tcoords[0],\n\t\t\t\t\t\t\t\t\t\tcoords[2],\n\t\t\t\t\t\t\t\t\t\tcoords[1],\n\t\t\t\t\t\t\t\t\t\tmm_diameter / 1000.,\n\t\t\t\t\t\t\t\t\t\tobstacle_lifetime)\n\t\t\tobstacle_id += 1\n\t\t\tif send_data:\n\t\t\t\tsend_obstacle_data(current_obstacle)\n\t\t\tobstacle_list.append(current_obstacle)\n\n\t\tif visualize:\n\t\t\t# begin visualization\n\t\t\tcv2.drawContours(color, [box], 0, (0, 0, 255), 1)\n\t\t\tcv2.rectangle(color, (x, y), (x + obj_length, y + obj_height), (0, 255, 0), 2)\n\t\t\tfont = cv2.FONT_HERSHEY_SIMPLEX\n\t\t\tcv2.putText(color, 'id = %d' % current_obstacle.id, (cx, cy + 15), font, 0.4, (255, 0, 255),\n\t\t\t\t\t\t1, cv2.LINE_AA)\n\t\t\tcv2.putText(color, \"x = %.2f\" % coords[0], (cx, cy + 30), font, 0.4, (0, 0, 255), 1,\n\t\t\t\t\t\tcv2.LINE_AA)\n\t\t\tcv2.putText(color, \"y = %.2f\" % coords[2], (cx, cy + 45), font, 0.4, (0, 255, 0), 1,\n\t\t\t\t\t\tcv2.LINE_AA)\n\t\t\tcv2.putText(color, \"z = %.2f\" % (obj_depth / 1000), (cx, cy + 60), font, 0.4, (255, 0, 127),\n\t\t\t\t\t\t1, cv2.LINE_AA)\n\t\t\tcv2.putText(color, \"diameter = %.2f\" % (mm_diameter / 1000), (cx, cy + 75), font, 0.4,\n\t\t\t\t\t\t(255, 127, 0), 1,\n\t\t\t\t\t\tcv2.LINE_AA)\n\treturn obstacle_id",
"def spawn_obstacles(self):\n self.obstacle_sprites.empty()\n number_of_obstacles = random.randint(MIN_OBSTACLES, MAX_OBSTACLES)\n while len(self.obstacle_sprites) < number_of_obstacles:\n obstacle = Obstacle(random.randrange(0, WIDTH), random.randrange(HEIGHT - 500, HEIGHT))\n obstacle_collision = pygame.sprite.spritecollide(obstacle, self.obstacle_sprites, False)\n if not obstacle_collision:\n self.obstacle_sprites.add(obstacle)",
"def __init__(self, costmap):\n # Copy the map metadata\n self.resolution = costmap.info.resolution\n self.min_x = costmap.info.origin.position.x\n self.min_y = costmap.info.origin.position.y\n self.y_width = costmap.info.height\n self.x_width = costmap.info.width\n self.max_x = self.min_x + self.x_width *self.resolution\n self.max_y = self.min_y + self.y_width *self.resolution\n print self.min_x, self.min_y\n print self.max_x, self.max_y\n print \"Resolution: \", self.resolution\n print self.x_width, self.y_width\n \n\n self.motion = self.get_motion_model()\n \n # Copy the actual map data from the map\n x = 0\n y = 0\n ox = list()\n oy = list()\n # obstacle map generation\n self.obstacle_map = [[False for _ in range(self.y_width)]\n for _ in range(self.x_width)]\n obstacles = 0\n for value in costmap.data:\n if value >95:\n obstacles += 1\n self.obstacle_map[x][y] = True\n ox.append(float(x)*self.resolution +self.min_x)\n oy.append(float(y)*self.resolution +self.min_y)\n # Update the iterators\n x += 1\n if x == self.x_width:\n x = 0\n y += 1\n print \"Loaded %d obstacles\"%(obstacles)\n if show_animation: # pragma: no cover\n plt.plot(ox, oy, \".k\")\n plt.grid(True)\n \n # plt.axis(\"equal\")",
"def drawpath(self,obstacles):\n for i in obstacles:\n self.distance_map[i[0],i[1]]=44\n print(\"Distance map\")\n print(self.distance_map)\n for i in self.footprint:\n self.distance_map[i[0],i[1]]=88\n print(\"Evaluated path\")\n print(self.distance_map)",
"def GroundExcelAddObstacleLevel(builder, ObstacleLevel):\n return AddObstacleLevel(builder, ObstacleLevel)",
"def updateObstacles(self, obstacles):\r\n global_obs = self.calcGlobalObstaclePosition(obstacles)\r\n self.globalObstaclesList.extend(global_obs)",
"def load_from_info(self, course_info):\n for item in course_info[\"obstacles\"]:\n klass = self.class_map[item[0].lower()]\n midbottom = item[1]\n obstacle = klass(midbottom, self.obstacles)\n if \"gate\" in item[0].lower():\n self.gates.add(obstacle)",
"def add_new_goal(self):\n while True:\n goal = self.__generate_new_goal()\n intersects = self.__check_obstacle_intersections(goal)\n if not intersects:\n self.current_goal = goal\n break",
"def check_for_obstacles(self):\n obs = False\n obs_p = []\n for point in self.obstacles:\n if -0.15 <= point[1] <= 0.15: # robot is 178mm wide\n # Obstacles should be less than or equal to 0.2 m away before being detected\n if 0 <= point[0] <= .2:\n obs_p.append(point)\n obs = True\n if obs:\n pos = self.determine_pos_of_obstacle(obs_p)\n data = Obstacle()\n data.x = pos[0]\n data.y = pos[1]\n data.obstacle = True\n self.obs_pub.publish(data)",
"def request_move(self, map_object, x, y):\n self.moveRequests.append([map_object, x, y])",
"def move(self, fruit, corner):\n\n if not check_if_inside(self.x, self.y, corner):\n self.state = 'dead'\n self.fitness = max(1, self.fitness - 5)\n\n if self.size > 4:\n for i in range(self.size - 1):\n if (self.x, self.y) == self.tail[-(i + 2)]:\n self.state = 'dead'\n self.fitness = max(1, self.fitness - 5)\n\n if self.state == 'alive':\n\n location = (self.x, self.y)\n self.tail.append(location)\n self.tail.pop(0)\n\n data = []\n\n distance = estimate_distance(self, fruit)\n angle = estimate_angle(self, fruit)\n\n x_direction_left = round(self.direction_x * math.cos(angle) - self.direction_y * math.sin(angle))\n y_direction_left = round(self.direction_x * math.sin(angle) + self.direction_y * math.cos(angle))\n\n x_direction_right = round(self.direction_x * math.cos(angle) + self.direction_y * math.sin(angle))\n y_direction_right = round(-self.direction_x * math.sin(angle) + self.direction_y * math.cos(angle))\n\n if not check_if_inside(self.x + x_direction_left, self.y + y_direction_left, corner):\n obstacle_to_left = 1\n else:\n obstacle_to_left = 0\n\n if not check_if_inside(self.x + x_direction_right, self.y + y_direction_right, corner):\n obstacle_to_right = 1\n else:\n obstacle_to_right = 0\n\n if not check_if_inside(self.x + self.direction_x, self.y + self.direction_y, corner):\n obstacle_ahead = 1\n else:\n obstacle_ahead = 0\n\n data.append(distance)\n data.append(angle)\n data.append(obstacle_ahead)\n data.append(obstacle_to_left)\n data.append(obstacle_to_right)\n\n self.output = self.predict(data)\n\n if np.argmax(self.output) == 0:\n self.direction_x = x_direction_left\n self.direction_y = y_direction_left\n elif np.argmax(self.output) == 1:\n self.direction_x = x_direction_right\n self.direction_y = y_direction_right\n\n self.x = self.x + self.direction_x\n self.y = self.y + self.direction_y\n\n distance_after = estimate_distance(self, fruit)\n\n # if distance_after < distance:\n # self.fitness += 6\n # else:\n # self.fitness = max(1, self.fitness - 7.5)"
] | [
"0.75989723",
"0.75989723",
"0.7295586",
"0.7156997",
"0.6637772",
"0.65937614",
"0.6447093",
"0.6407341",
"0.6341093",
"0.6300385",
"0.6247986",
"0.6215138",
"0.61860317",
"0.60853684",
"0.6058344",
"0.6036493",
"0.60249126",
"0.60026395",
"0.5975957",
"0.59466404",
"0.59013474",
"0.5880715",
"0.5863128",
"0.58377594",
"0.58337843",
"0.56859237",
"0.5683584",
"0.5676506",
"0.56450784",
"0.5617775"
] | 0.78030485 | 0 |
Add a waypoint to the drone | def add_waypoint(self, waypoint):
self.drone.add_waypoint(waypoint) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def waypoint_add_rel(self):\n pass",
"def waypoint_add_global(self):\n pass",
"def create_waypoint(self, waypoint):\n connection = self.__create_connection()\n try:\n waypoint_list = list(waypoint)\n key = self.__compound_key(waypoint)\n waypoint_list.insert(0, key)\n\n keyed_waypoint = tuple(waypoint_list)\n\n sql = ''' INSERT INTO waypoints(waypoint_id, x, y, z, distance, heading, visit_count)\n VALUES(?,?,?,?,?,?,?) '''\n cur = connection.cursor()\n cur.execute(sql, keyed_waypoint)\n connection.commit()\n cur.close()\n return\n except sqlite3.Error as e:\n print(e)\n finally:\n connection.close()",
"def setPath(self, request, context):\n \n cmds = self.vehicle.commands\n coordFrame, alt = None, None\n waypoints = []\n \n # The idea behind stripping off the first position is to determine what reference frame to\n # to use. Future proto changes will removed the coordinate frame boolean flag from the \n # request making the code unnecessary. For now, this is the way it is.\n firstPosition = nth(request, 0)\n lat = firstPosition.lat\n lon = firstPosition.lon\n \n useRelativeAltitude = firstPosition.useRelativeAltitude\n \n if useRelativeAltitude:\n alt = firstPosition.relativeAltitude\n coordFrame = mavutil.mavlink.MAV_FRAME_GLOBAL_RELATIVE_ALT\n else:\n alt = firstPosition.gpsAltitude\n coordFrame = mavutil.mavlink.MAV_FRAME_GLOBAL\n\n print ('First position at ({0},{1}) -> {2}'.format(lat, lon, alt))\n waypoints.append([lat, lon, alt])\n nextIndex = self.vehicle.commands.next\n # Make sure the drone is not in AUTO mode. \n #self.vehicle.mode = VehicleMode(\"LOITER\")\n self.clear_mission(cmds, coordFrame)\n \n # Add first position\n cmds.add(Command( 0, 0, 0, coordFrame, mavutil.mavlink.MAV_CMD_NAV_WAYPOINT, 0, 0, 0, 0, 0, 0, lat, lon, alt))\n \n # Add the remaining positions\n for position in request:\n lat = position.lat\n lon = position.lon\n if useRelativeAltitude:\n alt = position.relativeAltitude\n else:\n alt = position.gpsAltitude\n print ('Point at ({0},{1}) -> {2}'.format(lat, lon, alt))\n cmds.add(Command( 0, 0, 0, coordFrame, mavutil.mavlink.MAV_CMD_NAV_WAYPOINT, 0, 0, 0, 0, 0, 0, lat, lon, alt))\n waypoints.append([lat, lon, alt])\n \n print (\"Uploading new commands to drone\")\n cmds.upload()\n \n # Reset mission set to first (0) waypoint\n #if self.vehicle.commands.next !=0:\n # print \"Continuing mission...\"\n #else:\n # print \"Starting mission\"\n # self.vehicle.commands.next = 0\n if len(self.vehicle.waypoints)==0:\n print \"Starting mission\"\n self.vehicle.commands.next = 0\n else:\n print \"Continuing mission...\"\n self.vehicle.commands.next = nextIndex\n \n self.vehicle.waypoints = waypoints \n self.vehicle.mode = VehicleMode(\"AUTO\")\n \n self.print_mission() \n \n return droneconnect_pb2.Null()",
"def waypoint_callback(self, wp):\n if self.trajectory_constructed == False: \n NextwpPosition = np.array([wp.position.x, wp.position.y, wp.position.z])\n NextwpOrientation = np.array([wp.orientation.x, wp.orientation.y, wp.orientation.z, wp.orientation.w])\n self.pc_x, self.pc_y, self.pc_z, self.seg_times, self.traj_t0 = self.make_trajectory(NextwpPosition, NextwpOrientation) \n self.trajectory_constructed = True",
"def addDrone(self, myDrone):\n self.drones.append(myDrone)",
"def getNextWaypoint(self, request, context):\n\n waypointNumber = self.vehicle.commands.next -1\n missionlist = self.vehicle.waypoints\n if len(missionlist)==0:\n waypointNumber = -1\n dronePosition = droneconnect_pb2.Position(lat = float(0),\n lon = float(0),\n gpsAltitude = float(0))\n else:\n waypoint = missionlist[waypointNumber]\n dronePosition = droneconnect_pb2.Position(lat = float(waypoint[0]),\n lon = float(waypoint[1]),\n gpsAltitude = float(waypoint[2]))\n \n return droneconnect_pb2.IndexedPosition(position = dronePosition, index = waypointNumber)",
"def onAddButtonPress(self, button):\n\t\twp_x = float(self.traj_to_x_entry.get_text())\n\t\twp_y = float(self.traj_to_y_entry.get_text())\n\t\twp_z = float(self.traj_to_z_entry.get_text())\n\t\twp_yaw = float(self.traj_to_yaw_entry.get_text())\n\n\t\t# add waypoint to list\n\t\twaypoints_gui.append([wp_x, wp_y, wp_z, wp_yaw])\n\n\t\t# reset entry fields\n\t\tself.traj_to_x_entry.set_text('')\n\t\tself.traj_to_y_entry.set_text('')\n\t\tself.traj_to_z_entry.set_text('')\n\t\tself.traj_to_yaw_entry.set_text('')",
"def add_travel_direction(self, direction, node):\n self.travel_directions[direction] = node",
"def add_route(self, distance, start, destination):\r\n self.edges[start].append(Edge(distance, start, destination))\r\n self.edges[destination].append(Edge(distance, destination, start))",
"def move_to(self, waypoint):\n self.set_final_wp(waypoint)\n self.go()\n currPos = np.asarray(self.rexarm.get_positions())\n while(np.linalg.norm(np.asarray(waypoint) - currPos) > 0.15):\n time.sleep(0.01)",
"def write_waypoint(self, latitude=None, longitude=None, description=None):\n\n if not description:\n description = ''\n\n latitude = self.format_latitude(latitude)\n longitude = self.format_longitude(longitude)\n\n self.write_config(\n 'ADDWP', '%s,%s,%s' % (latitude, longitude, description[0:50])\n )",
"def add_destination(self):\n pass",
"def publish_waypoints(self):\n\n # Make a lane message\n lane = Lane()\n\n # Get closest waypoint index\n closest_idx = self.get_closest_waypoint_idx()\n\n # Get farthest waypoint index\n farthest_idx = closest_idx + LOOKAHEAD_WPS\n\n # Slice to get the upcoming waypoints\n upcoming_waypoints = self.waypoints.waypoints[closest_idx:farthest_idx]\n\n # If no stopline detected or stopline is beyond farthest index...\n if (self.stopline_waypoint_idx == -1) or (self.stopline_waypoint_idx >= farthest_idx):\n\n # Follow the upcoming waypoints\n lane.waypoints = upcoming_waypoints\n\n else:\n\n # Create a list to hold modified upcoming waypoints\n temp = []\n\n # Find the relative stopline index within the upcoming waypoints\n # Back off by two waypoints so that front of car stays behind\n # stopline.\n stop_idx = max(self.stopline_waypoint_idx-closest_idx-2, 0)\n\n # Get the deceleration velocities at each upcoming waypoint\n velocities = self.deceleration_velocities(upcoming_waypoints, stop_idx)\n\n # For each upcoming waypoint...\n for i, wp in enumerate(upcoming_waypoints[:-1]):\n\n # Create a new waypoint\n p = Waypoint()\n\n # Dupicate the pose of the existing waypoint\n p.pose = wp.pose\n\n # Limit current velocities to decelration velocities\n p.twist.twist.linear.x = min(velocities[i], p.twist.twist.linear.x)\n\n # Add the modified waypoint to the list\n temp.append(p)\n\n # Follow the modified upcoming waypoints\n lane.waypoints = temp\n\n # Publish the lane message\n self.final_waypoints_pub.publish(lane)",
"def process_waypoint(self, waypoint: Waypoint) -> Union[Trip, None]:\n ...",
"def waypoint_callback(self,msg):\n self.waypoint_loc = msg.data",
"def __init__(self):\n super().__init__()\n self.waypoint_vector = [-1, 10]",
"def add_point(self, pt):\n self.points.append(pt)",
"def sendWaypoints(self,waypoints):\n data = _RobotCommunicator.WAYPOINT_HEADER\n for waypoint in waypoints:\n x,y = waypoint\n data = data + pack(_RobotCommunicator.WAYPOINT_FORMAT,x,y)\n self.udpSock.sendto(data,self.addr)",
"def _add_point(self):\r\n self.command_stack.do(model.structure.AddPoint(self._structure, self._point_index+1, 0, 0))",
"def store_waypoint(self, msg: PoseStamped) -> None:\n rospy.logdebug(\"Received waypoint %s\" % str(msg.pose.position))\n self._waypoints.put(msg)",
"def __init__(self, nav,\n waypoint=ll.LatLon(50.742810, 1.014469), # somewhere in the solent\n target_radius=2, waypoint_id=None,\n ):\n self.nav = nav\n self.waypoint = waypoint\n self.waypoint_id = waypoint_id\n x, y = self.nav.latlon_to_utm(waypoint.lat.decimal_degree, waypoint.lon.decimal_degree)\n self.waypoint_xy = Point(x, y)\n self.target_area = self.waypoint_xy.buffer(target_radius)",
"def addPoint(self, point):\n self.points.append(point)",
"def add(self, point):\n self.points.append(point)",
"def to_waypoint_message(self):\n\n #**********************************************************************\n # Fill in frame and position\n #**********************************************************************\n wp = msg.Waypoint()\n wp.frame = msg.Waypoint.FRAME_GLOBAL\n wp.x = self.latitude\n wp.y = self.longitude\n wp.z = self.altitude\n\n #**********************************************************************\n # Set other attributes to safe defaults. Worst case, if this\n # waypoint was used unchanged to control drone, you'd expected to\n # wait at this waypoint forever (because its effectively unreachable\n # within 0 radius.\n #**********************************************************************\n wp.autocontinue = False\n wp.radius = 0.0\n wp.waitTime = rospy.Duration(secs=0.0)\n return wp",
"def set_destination(self, start_waypoint, end_waypoint, time=False):\n\n self.create_samples(start_waypoint, end_waypoint)\n\n route_trace = self._trace_route(time=time)\n assert route_trace\n\n self._local_planner.set_global_plan(route_trace)",
"def get_gpx_waypoint(self, route, line_location, start_time):\n\n lng, lat = self.get_coords()\n time = start_time + route.get_time_data(line_location, \"schedule\")\n altitude_on_route = route.get_distance_data(line_location, \"altitude\")\n\n return GPXWaypoint(\n name=self.name,\n longitude=lng,\n latitude=lat,\n elevation=altitude_on_route,\n type=self.place_type.name,\n time=time,\n )",
"def addPoint(self, *args, **kwargs):\n ...",
"def add_move(self, direction, priority=False, when_finished=None):\n if priority:\n self.path = [(direction, when_finished)] + self.path\n else:\n self.path.append((direction, when_finished))",
"def append(self,d,p):\n if d.size != 3:\n raise Exception(\"New direction not 3-vector\")\n if p.size != 3:\n raise Exception(\"New point not 3-vector\")\n self.dirlist.append(d)\n self.pointlist.append(p)"
] | [
"0.75902",
"0.7147039",
"0.6792248",
"0.62503475",
"0.6224604",
"0.6080674",
"0.60524154",
"0.6044039",
"0.603555",
"0.60317796",
"0.5964286",
"0.5918597",
"0.58790517",
"0.5873767",
"0.58673775",
"0.5866497",
"0.585557",
"0.5817661",
"0.58173054",
"0.57790995",
"0.5753916",
"0.5748289",
"0.57431036",
"0.57396597",
"0.57374954",
"0.57053655",
"0.5667055",
"0.56641895",
"0.5646554",
"0.5628921"
] | 0.8948917 | 0 |
Set the drone's location in the map | def set_drone_position(self, new_point):
self.drone.set_drone_position(new_point) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def m_location_set(self, x: int, y: int):\n pass",
"def set_location(self, location_set):",
"def set_location(self, lat, long):\n self._data['loc'] = [lat, long]",
"def location(self, value: 'Point'):\n self.geometry.location = value",
"def set_location(self, location):\n self.location = location",
"def set_coordinate(self):\n airqual_dictionary = self.realtime_data['stations'][0] #get the very first(recent) data/result\n self.latitude = airqual_dictionary['lat']\n self.longitude = airqual_dictionary['lng']",
"def set_location(self, x, y):\n self.scene.set_location(x, y)\n self.redraw()",
"def set_new_location(self, xPos, yPos):",
"def set_location(self, client, latitude, longitude):\r\n client.setLocation(latitude, longitude)\r\n return True",
"def set_loc(self, loc):\n self.loc = loc",
"def set_location(self, location):\r\n self.__location = location",
"def set_location(self, location):\n self.location = location",
"def setPosition(self):\n # determine posX, posY for battle\n (x1,y1) = globals.battlemapQuadrants[self.systemGrid]\n self.posX = x1+self.setX\n self.posY = y1+self.setY",
"def set_coordinates(self):\n client = Client(api_key=settings.YANDEX_GEOCODER_KEY)\n address = f'Россия, {self.state}, {self.city}, {self.street}'\n self.longitude, self.latitude = client.coordinates(address)",
"def set_location(self, location: tuple) -> None:\n self.__location = location",
"def set_coord(self, longitude, latitude):\r\n self.longitude = longitude\r\n self.latitude = latitude\r\n self._grid_proj = _get_projection(self._grid_proj, self.longitude, self.latitude)",
"def set_node(self, index, node):\r\n self.loc.coord[index] = node",
"def set_coordinates(self, x, y):\n self.x = x\n self.y = y",
"def configure_location(self):\n # Set floor correctly\n self.floor.set(\"pos\", array_to_string(self.bottom_pos))",
"def _set_location(self):\n # Get selected text\n self.locnaam = self.location_combobox.currentText()\n\n self._set_data()",
"def set_position(self, lat, lng, h):\n self.lat = lat\n self.lng = lng\n self.h = h",
"def setPoint(self, point):\n self.position = point.position",
"def set_home_position(self, lat, lon, alt):\n pass",
"def setzePosition(self, x, y):\n self.zielX = x\n self.zielY = y",
"def drawlocation(self, Type, llon, rlon, llat, rlat):\n bm.BaseMapSet(Type, llon, rlon, llat, rlat)\n \n plt.scatter(self.demandx, self.demandy, 100, self.color, marker = 'o', label = self.demandname) \n \n plt.scatter(self.tranx, self.trany, 200, self.color, marker = '*', label = self.tranname) \n \n plt.scatter(self.supplyx, self.supplyy, 400, self.color, marker = '+', label = self.supplyname) \n \n plt.legend(bbox_to_anchor=(1, 1), loc='upper left', ncol=1, fontsize = 25)",
"def location(self, location):\n self._location = location",
"def setPoint(self, point):\n self._point = point\n self._point = self.projectPoint(Point.origin(point.dimension))",
"def setMap(self, map):\n maputils.detachMapStats(self.map)\n\n activeTool = self._activeTool\n self.__base.setMap(self, map)\n if activeTool:\n self.setTool(activeTool)\n\n self._updateLevelSlider()",
"def set_location(self, x, y, z=0):\n self._rect.topleft = (x, y)\n self._z = z\n self._update()",
"def setLocation(self, p):\n super(PolygonTool, self).setLocation(p.point)\n _x, _y = self.getLocation().getCoords()\n _count = self.__nsides\n _inc = self.__increment\n if self.__external:\n _offset = _inc/2.0\n else:\n _offset = 0.0\n _cx, _cy = self.__center.point.getCoords()\n _xsep = _x - _cx\n _ysep = _y - _cy\n _angle = math.atan2(_ysep, _xsep) + _offset\n _rad = math.hypot(_xsep, _ysep)/math.cos(_offset)\n _xp = self.__xpts\n _yp = self.__ypts\n for _i in range(_count):\n _xp[_i] = _cx + (_rad * math.cos(_angle))\n _yp[_i] = _cy + (_rad * math.sin(_angle))\n _angle = _angle + _inc"
] | [
"0.6776562",
"0.67337173",
"0.6717304",
"0.66437644",
"0.66012245",
"0.65962243",
"0.65886664",
"0.6457518",
"0.643699",
"0.63909936",
"0.62847275",
"0.6281692",
"0.6220761",
"0.6214897",
"0.60538673",
"0.60247016",
"0.6005318",
"0.5934853",
"0.5927787",
"0.5925752",
"0.5910971",
"0.59025985",
"0.58874655",
"0.587144",
"0.58130527",
"0.5809607",
"0.58010453",
"0.5780912",
"0.57576734",
"0.5751611"
] | 0.6822219 | 0 |
Reset the obstacles' positions within the map (should be called when map is refreshed to clean the array) | def reset_obstacles(self):
self.obstacles = np.array([]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reset(self):\n self.obstacles = []\n self._tick = 0",
"def recreate_obstacles(self):\n self.board_matrix = np.full(Dimension.board_size(), 1)\n self.obstacles = self.create_obstacles()",
"def reset(self) -> None:\n self.map = []\n for col in range(self.width):\n self.map.append([])\n for cell in range(self.height):\n if col > 1 and col < self.width - 2:\n if cell == 0:\n # World Barrier - Top Middle\n self.map[col].append(StaticTile('wall_3', self.graphicsLibrary.get('wall_3'), (self.scaleWidth,self.scaleHeight), barrier=True))\n elif cell == self.height - 1:\n # World Barrier - Bottom Middle\n self.map[col].append(StaticTile('wall_12', self.graphicsLibrary.get('wall_12'), (self.scaleWidth,self.scaleHeight), barrier=True))\n else:\n # Playable Map Area\n if (col % 2) != 0 and (cell % 2) == 0:\n # Hard-Barrier Generation\n self.map[col].append(StaticTile('solid', self.graphicsLibrary.get('solid'), (self.scaleWidth,self.scaleHeight), barrier=True))\n elif (col,cell) in self.spawn_buffers:\n # Preserve Potential Spawn Points\n self.map[col].append(StaticTile('terrain', self.graphicsLibrary.get('terrain'), (self.scaleWidth,self.scaleHeight), barrier=False))\n elif random.randint(0, 2) == 0:\n # Soft-Barrier Generation\n self.map[col].append(DynamicTile('destructable_new', self.graphicsLibrary.get('destructable_new'), (self.scaleWidth,self.scaleHeight), destructable=\"True\", barrier=True, death_animation=self.animations_library.get('destructable_death')))\n else:\n # Fill Remaining Terrain\n self.map[col].append(StaticTile('terrain', self.graphicsLibrary.get('terrain'), (self.scaleWidth,self.scaleHeight), barrier=False))\n else:\n # World Barrier - Side Sections\n if col == 0 or col == self.width - 1:\n # Roof\n right_most_columns = False\n if col == self.width - 1:\n right_most_columns = True\n\n if cell == self.height - 1:\n self.map[col].append(StaticTile('wall_10', self.graphicsLibrary.get('wall_10'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif cell == self.height - 2:\n self.map[col].append(StaticTile('wall_1', self.graphicsLibrary.get('wall_1'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif cell == 0:\n self.map[col].append(StaticTile('wall_1', self.graphicsLibrary.get('wall_1'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n else:\n self.map[col].append(StaticTile('wall_5', self.graphicsLibrary.get('wall_5'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif col == 1 or col == self.width - 2:\n # Floor \n right_most_columns = False\n if col == self.width - 2:\n right_most_columns = True\n\n if cell == self.height -1:\n self.map[col].append(StaticTile('wall_11', self.graphicsLibrary.get('wall_11'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif cell == self.height - 2:\n self.map[col].append(StaticTile('wall_9', self.graphicsLibrary.get('wall_9'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif cell == 0:\n self.map[col].append(StaticTile('wall_2', self.graphicsLibrary.get('wall_2'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n elif cell == 1:\n self.map[col].append(StaticTile('wall_6', self.graphicsLibrary.get('wall_6'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n else:\n self.map[col].append(StaticTile('wall_7', self.graphicsLibrary.get('wall_7'), (self.scaleWidth,self.scaleHeight), flip_x=right_most_columns, barrier=True))\n self.map[col][cell].place_at(topleft=(self.scaleWidth * col, self.scaleHeight * cell))",
"def _reset_map(self):\n if not self.scenario_name.startswith('random'):\n self._map = self._fixed_original_map.copy()\n else:\n from environment.scenarios import generate_random_map\n self._map = generate_random_map(self.scenario_name)\n\n # Precompute wall channel and positions since they are static\n self._walls_channel = (self._map == WALL).astype(int)\n xs, ys = np.where(self._walls_channel)\n self._wall_positions = list(zip(xs, ys))\n\n # Set avatar position bidirectional caches (first thieves then guardians)\n xs_t, ys_t = np.where(self._map == THIEF)\n xs_g, ys_g = np.where(self._map == GUARDIAN)\n xs = np.concatenate([xs_t, xs_g])\n ys = np.concatenate([ys_t, ys_g])\n for avatar_id, (x, y) in enumerate(zip(xs, ys)):\n self._id2pos[avatar_id] = x, y\n self._pos2id[(x, y)] = avatar_id\n\n self._chased_treasure_pos = _coords_where(self._map == TREASURE)\n self._chased_thief_id = 0",
"def reset(self):\n \n #initiate all tiles' value to 0\n self._grid_2048 = [[0 for dummy_col in range(self._grid_width)] for dummy_row in range(self._grid_height)]\n \n # two new tiles\n self.new_tile()\n self.new_tile()",
"def _clear_map(self, default=100):\r\n self.tiles = [\r\n [default\r\n for _ in range(self.height)]\r\n for _ in range(self.width)]\r\n\r\n for (x, y, score) in self.goals:\r\n self.tiles[x][y] = score\r\n\r\n for (x,y) in self.walls:\r\n self.tiles[x][y] = np.nan",
"def reset_map(self):\n self.x = None\n self.X = None\n self.y = None\n self.Y = None\n self.data = None\n self.sampling = None\n self.size = None",
"def reset(self):\n\n #Create a grid of zeros\n self._grid = [[0 for dummy_col in range(self._grid_width)] for dummy_row in range(self._grid_height)]\n # _available_new_tiles will be refilled every 10 moves\n self._available_new_tiles = TOTAL_AVAILABLE_MOVES[:]\n for dummy_i in range(2):\n self.new_tile()\n self._game_over = False",
"def specific_reset(self) -> None:\n\n # first, set agent xy and adjust its height\n self.agent.specific_reset()\n agent_pos = np.zeros(3)\n agent_pos = np.concatenate((agent_pos[:2], [self.agent.init_xyz[2]]))\n self.agent.set_position(agent_pos)\n\n # second, reset obstacle positions\n if len(self.obstacles) > 0:\n obs_init_pos = env_utils.generate_obstacles_init_pos(\n num_obstacles=len(self.obstacles),\n agent_pos=self.agent.get_position(),\n goal_pos=np.array([]), # no goal in gather task\n world=self.world,\n min_allowed_distance=self.obstacle_obstacle_distance,\n agent_obstacle_distance=self.agent_obstacle_distance\n )\n for i, ob in enumerate(self.obstacles):\n ob.set_position(obs_init_pos[i])\n\n # finally, make all collected objects visible again\n [ob.update_visuals(make_visible=True) for ob in self.obstacles]",
"def reset_objects(new_game_map):\r\n new_coins = []\r\n new_enemies = []\r\n y_val = 0\r\n for row in new_game_map:\r\n x_val = 0\r\n for tile in row:\r\n if tile == '3':\r\n new_coins.append(pygame.Rect((x_val * TILE_SIZE), (y_val * TILE_SIZE), TILE_SIZE, TILE_SIZE))\r\n if tile == '4':\r\n new_enemies.append([[0, 0], pygame.Rect((x_val * TILE_SIZE), (y_val * TILE_SIZE), TILE_SIZE, TILE_SIZE), 1, True, ['enemy_move', 0]])\r\n x_val += 1\r\n y_val += 1\r\n return new_coins, new_enemies, []",
"def reset(self):\r\n # replace with your code\r\n for row in range(0, self._grid_height):\r\n for col in range(0, self._grid_width):\r\n self._grid_tile[row][col] = 0\r\n # at this step, all cells should be available\r\n self.new_tile()\r\n self.new_tile()",
"def reset(self):\r\n # replace with your code\r\n self._cells = [[0 for dummy_col in range(self._grid_width)] for dummy_row in range(self._grid_height)]\r\n self.new_tile()\r\n self.new_tile()",
"def reset(self, grid, disallowed, num_of_obstacles):\n # self.array.clear()\n random_array = []\n\n # If I want the obstacles in the same location every episode\n # random.seed(10)\n\n # Make a copy of the grid\n allowed = grid[:]\n\n [allowed.remove(pos) for pos in disallowed]\n\n for i in range(num_of_obstacles):\n new_pos = random.choice((allowed))\n self.array.append(new_pos)\n random_array.append(new_pos)\n allowed.remove(new_pos)\n\n self.array_length = self.array_length + num_of_obstacles\n\n return random_array",
"def reset(self):\r\n self._cells = [ [0 for dummy_col in range(self._grid_width)] \r\n for dummy_row in range(self._grid_height) ]\r\n \r\n \r\n self.new_tile()\r\n self.new_tile()",
"def reset_map(self):\n self.reset_world(self._filename)",
"def reset(self):\r\n self.grid = [[0 for dummy_col in range(self.grid_width)] for dummy_row in range(self.grid_height)]\r\n self.new_tile()\r\n self.new_tile()",
"def reset(self):\n # replace with your code\n self._grid = [[0] * self._width for _ in xrange(self._height)]\n self.new_tile()\n self.new_tile()",
"def reset(self):\n # self.grid = [[0] * self.grid_width] * self.grid_height\n self.grid = []\n for dummy_row in range(self.grid_height):\n new_row = []\n for dummy_col in range(self.grid_width):\n new_row.append(0)\n self.grid.append(new_row)\n self.new_tile()\n self.new_tile()",
"def reset(self):\n # replace with your code\n dummy_row = self._grid_height\n dummy_col = self._grid_width\n self._cells = [ [0 for dummy_col in range(self._grid_width)] \n for dummy_row in range(self._grid_height)]\n \n self.new_tile()\n self.new_tile()",
"def reset(self):\n # replace with your code\n self._grid = [[0 for dummy_column in range(self._grid_width)] for dummy_row in range(self._grid_height)]\n for dummy_num in range(2):\n self.new_tile()",
"def reset_position(self): \n self.rect.x = 400\n self.rect.y = 400\n \n # Specifies the Player's spawnpoint as maze_arrangement[8][8], representing\n # the tile in the center of the maze \n self.__minotaur_x = 8\n self.__minotaur_y = 8",
"def reset(self):\n self.grid = [[0 for col in range(self.grid_width)] for row in range(self.grid_height)]\n # next, create a list of all tuples (row,col)\n self.tiles = [(row,col) for row in range(self.grid_height) for col in range(self.grid_width)] \n for dummy_idx in range(2):\n self.new_tile()\n self.dir_dic = {\n UP:[(0, col) for col in range(self.grid_width)],\n DOWN:[(self.grid_height-1, col) for col in range(self.grid_width)],\n LEFT:[(row,0) for row in range(self.grid_height)], \n RIGHT:[(row,self.grid_width-1) for row in range(self.grid_height)]}",
"def reset(self):\r\n\r\n self._board = [[0 for x in range(self._grid_width)]\r\n for y in range(self._grid_height)]\r\n self.new_tile()",
"def reset(self):\n self._cells = [[0 for dummy_col in range(self._grid_width)] for dummy_row in range(self._grid_height)]\n self.new_tile()\n self.new_tile()\n #return self._cells",
"def reset(self):\n self._maps = {}",
"def reset(self):\r\n self.board = [[0 for i in range(self.width)]\r\n for i in range(self.height)]\r\n self.new_tile()\r\n self.new_tile()",
"def reset(self, roi_warped_points):\n self.__init__(roi_warped_points)",
"def reset(self):\n # replace with your code\n self.board = [[0 for dummy_index in range(self.grid_width)] for dummy_inner_index in range(self.grid_height)]",
"def _reset_game(self):\n\t\tself.is_play = True\n\t\tself.is_gameover = False\n\t\tself.bolan.rect.y = self.bolan.default_y\n\t\tself.scoreboard.score = 0\n\t\tself.obstacles._reset_positions()",
"def reset(self):\n self._grid = [[0 for dummy_col in range(self._width)]\n for dummy_row in range(self._height)]\n self.new_tile()\n self.new_tile()"
] | [
"0.80095875",
"0.7382117",
"0.72062683",
"0.7101044",
"0.7061013",
"0.70536333",
"0.7010583",
"0.6989404",
"0.69813454",
"0.69743735",
"0.6880206",
"0.68602246",
"0.6838129",
"0.6808036",
"0.67923963",
"0.6767282",
"0.67519385",
"0.67181975",
"0.67109746",
"0.6702349",
"0.6693133",
"0.66791046",
"0.6668063",
"0.66637594",
"0.6661358",
"0.665283",
"0.66279536",
"0.66215134",
"0.65996504",
"0.65979785"
] | 0.84130687 | 0 |
Generate possible paths around the passed obstacle | def generate_possible_paths(self, obstacle):
if self.does_uav_intersect_obstacle_vertically(obstacle, self.drone.get_point(), self.drone.get_waypoint_holder().get_current_waypoint()):
if self.does_path_intersect_obstacle_2d(obstacle, self.drone.get_point(), self.drone.get_waypoint_holder().get_current_waypoint()):
new_attempt_pos_points = [
[obstacle.get_point()[0] + obstacle.get_radius(), obstacle.get_point()[1] + obstacle.get_radius(), self.drone.get_point()[2]],
[obstacle.get_point()[0] - obstacle.get_radius(), obstacle.get_point()[1] - obstacle.get_radius(), self.drone.get_point()[2]],
[obstacle.get_point()[0] + obstacle.get_radius(), obstacle.get_point()[1] - obstacle.get_radius(), self.drone.get_point()[2]],
[obstacle.get_point()[0] - obstacle.get_radius(), obstacle.get_point()[1] + obstacle.get_radius(), self.drone.get_point()[2]],
[obstacle.get_point()[0], obstacle.get_point()[1] + obstacle.get_radius(), obstacle.get_height() + (Constants.STATIONARY_OBSTACLE_SAFETY_RADIUS * 2)],
[obstacle.get_point()[0], obstacle.get_point()[1] - obstacle.get_radius(), obstacle.get_height() + (Constants.STATIONARY_OBSTACLE_SAFETY_RADIUS * 2)],
[obstacle.get_point()[0] + obstacle.get_radius(), obstacle.get_point()[1], obstacle.get_height() + (Constants.STATIONARY_OBSTACLE_SAFETY_RADIUS * 2)],
[obstacle.get_point()[0] - obstacle.get_radius(), obstacle.get_point()[1], obstacle.get_height() + (Constants.STATIONARY_OBSTACLE_SAFETY_RADIUS * 2)]
]
new_paths = []
for new_pos_point in new_attempt_pos_points:
if not self.does_path_intersect_obstacle_3d(obstacle, self.drone.get_point(), new_pos_point) and self.flight_boundary.is_point_in_bounds(new_pos_point):
for recursive_new_pos_point in new_attempt_pos_points:
if self.flight_boundary.is_point_in_bounds(recursive_new_pos_point) and abs(recursive_new_pos_point[2] - new_pos_point[2]) < 5:
if recursive_new_pos_point[0] != new_pos_point[0] or recursive_new_pos_point[1] != new_pos_point[1]:
if not self.does_path_intersect_obstacle_3d(obstacle, new_pos_point, recursive_new_pos_point) and not self.does_path_intersect_obstacle_3d(obstacle, recursive_new_pos_point, self.drone.get_waypoint_holder().get_current_waypoint()):
new_paths.append([new_pos_point, recursive_new_pos_point])
# Uncomment for DEBUGGING ONLY
for path in new_paths:
print("Point:", str(path))
return new_paths
return [] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_path(self, start_point: Pos, end_point: Pos, obstacles: list) -> list:\n pass",
"def drawpath(self,obstacles):\n for i in obstacles:\n self.distance_map[i[0],i[1]]=44\n print(\"Distance map\")\n print(self.distance_map)\n for i in self.footprint:\n self.distance_map[i[0],i[1]]=88\n print(\"Evaluated path\")\n print(self.distance_map)",
"def uniquePathsWithObstacles(self, obstacleGrid: List[List[int]]) -> int:\n if obstacleGrid[0][0] == 1:\n return 0\n\n m, n = len(obstacleGrid), len(obstacleGrid[0])\n dp = [[0 for _ in range(n)] for _ in range(m)]\n dp[0][0] = 1\n\n for i in range(1, m):\n if obstacleGrid[i][0] == 1: break\n else: dp[i][0] = dp[i-1][0]\n\n for j in range(1, n):\n if obstacleGrid[0][j] == 1: break\n else: dp[0][j] = dp[0][j-1]\n\n for i in range(1, m):\n for j in range(1, n):\n if obstacleGrid[i][j] == 0:\n dp[i][j] = dp[i-1][j] + dp[i][j-1]\n\n return dp[-1][-1]",
"def calculate_path(self):\n #Se repite el ciclo para el número especificado de veces\n for i in range(self.iterations):\n for ant in self.ants:\n ant.setup_ant()\n while not ant.final_node_reached:\n #Seleccion aleatoria del nodo a visitar\n node_to_vist = self.select_next_node(self.map.nodes_array[int(ant.actual_node[0])][int(ant.actual_node[1])])\n #Mover la hormiga al siguiente nodo seleccionado al azar\n ant.move_ant(node_to_visit)\n #Compruebe si se ha alcanzado la solución\n ant.is_final_node_reached()\n #Agregar la ruta resultante a la lista de rutas\n self.add_to_path_results(self.delete_loops(ant.get_visited_nodes()))\n # Habilitar a la hormiga para otra busqueda\n ant.enable_start_new_path()\n \n # Actualizar el nivel global de feromonas\n self.pheromone_update()\n self.best_result = self.paths[0]\n\n #Vaciar la lista de rutas\n self.empty_paths()\n print('Iteration: ', i, 'lenght of the path: ', len(self.best_result))\n return self.best_result",
"def Find_Path(self):\n closed_nodes_map = [] # map of closed (tried-out) nodes\n open_nodes_map = [] # map of open (not-yet-tried) nodes\n dir_map = [] # map of directions\n row = [0] * self.n\n for i in range(self.m): # create 2d arrays\n closed_nodes_map.append(list(row))\n open_nodes_map.append(list(row))\n dir_map.append(list(row))\n \n pq = [[], []] # priority queues of open (not-yet-tried) nodes\n pqi = 0 # priority queue index\n # create the start node and push into list of open nodes\n n0 = node(self.xStart, self.yStart, 0.0, 0.0)\n n0.updatePriority(self.xFinish, self.yFinish)\n heappush(pq[pqi], n0)\n open_nodes_map[self.yStart][self.xStart] = n0.priority # mark it on the open nodes map\n \n # A* search\n while len(pq[pqi]) > 0:\n # get the current node w/ the highest priority\n # from the list of open nodes\n n1 = pq[pqi][0] # top node\n n0 = node(n1.xPos, n1.yPos, n1.distance, n1.priority)\n x = n0.xPos\n y = n0.yPos\n heappop(pq[pqi]) # remove the node from the open list\n open_nodes_map[y][x] = 0\n # mark it on the closed nodes map\n closed_nodes_map[y][x] = 1\n \n # quit searching when the goal state is reached\n if x == self.xFinish and y == self.yFinish:\n # Generate the path from finish to start by following the \n # directions.\n return self.Reconstruct_Path(dir_map)\n \n # generate moves (child nodes) in all possible directions\n for i in range(self.num_directions):\n new_x = x + self.dx[i]\n new_y = y + self.dy[i]\n Flag=True\n if not (new_x < 0 or new_x > self.n-1 or new_y < 0 or new_y > self.m - 1\n or self.MAP[new_y][new_x] == 1 or closed_nodes_map[new_y][new_x] == 1):\n # Check to see if the extended path runs through any obstacles\n if (abs(self.dx[i])>1 or abs(self.dy[i])>1):\n # Need to check that the path does not pass an object\n JumpCells=2*max(abs(self.dx[i]),abs(self.dy[i]))-1\n for K in range(1,JumpCells):\n YPOS=int(round(K*1.0*self.dy[i]/JumpCells))\n XPOS=int(round(K*1.0*self.dx[i]/JumpCells))\n if (self.MAP[y+YPOS][x+XPOS]==1):\n Flag=False\n if Flag: \n # generate a child node\n m0 = node(new_x, new_y, n0.distance, n0.priority)\n m0.calc_cost(self.dx[i], self.dy[i])\n m0.updatePriority(self.xFinish, self.yFinish)\n # if it is not in the open list then add into that\n if open_nodes_map[new_y][new_x] == 0:\n open_nodes_map[new_y][new_x] = m0.priority\n heappush(pq[pqi], m0)\n # mark its parent node direction\n dir_map[new_y][new_x] = (self.num_directions-i-1) % self.num_directions\n elif open_nodes_map[new_y][new_x] > m0.priority:\n # update the priority info\n open_nodes_map[new_y][new_x] = m0.priority\n # update the parent direction info\n dir_map[new_y][new_x] = (self.num_directions-i-1) % self.num_directions\n # replace the node\n # by emptying one pq to the other one\n # except the node to be replaced will be ignored\n # and the new node will be pushed in instead\n while not (pq[pqi][0].xPos == new_x and pq[pqi][0].yPos == new_y):\n heappush(pq[1 - pqi], pq[pqi][0])\n heappop(pq[pqi])\n heappop(pq[pqi]) # remove the wanted node\n # empty the larger size pq to the smaller one\n if len(pq[pqi]) > len(pq[1 - pqi]):\n pqi = 1 - pqi\n while len(pq[pqi]) > 0:\n heappush(pq[1-pqi], pq[pqi][0])\n heappop(pq[pqi]) \n pqi = 1 - pqi\n heappush(pq[pqi], m0) # add the better node instead\n return '','' # no route found",
"def search_path(self):\n\n nodes = [self.start]\n final_node = None\n \n count = 0\n while True:\n count += 1\n\n if count % self.pick_target == 0:\n pick = self.goal.pos[:2]\n else:\n pick = self.car.random_pos()[:2]\n \n nearest = self.get_nearest_node(nodes, pick)\n\n if count % self.check_dubins == 0:\n solutions = self.dubins.find_tangents(nearest.pos, self.goal.pos)\n dubins_route, cost, valid = self.dubins.best_tangent(solutions)\n \n if valid:\n final_node = nearest\n break\n\n phi = self.get_steering_angle(nearest.pos, pick)\n pos = nearest.pos\n branch = [pos[:2]]\n \n for i in range(self.max_steps):\n pos = self.car.step(pos, phi)\n branch.append(pos[:2])\n \n # check safety of route-----------------------\n if phi == 0:\n safe = self.dubins.is_straight_route_safe(nearest.pos, pos)\n else:\n d, c, r = self.car.get_params(nearest.pos, phi)\n safe = self.dubins.is_turning_route_safe(nearest.pos, pos, d, c, r)\n # --------------------------------------------\n \n if not safe:\n continue\n \n new_node = Node(pos, phi, i+1)\n \n if new_node in nodes:\n continue\n \n new_node.branch = branch\n new_node.parent = nearest\n nodes.append(new_node)\n \n route = self.backtracking(final_node) + dubins_route\n path = self.car.get_path(self.car.start_pos, route)\n print('Total iteration:', count)\n \n return path, nodes",
"def _build_path(self):\r\n\r\n path = []\r\n \r\n for i in range(len(self.path) - 1):\r\n current_node = self.path[i]\r\n next_node = self.path[i + 1]\r\n \r\n key_list = [i for i in range(len(current_node.leaving_roads)) if current_node.leaving_roads[i].end == next_node]\r\n \r\n if len(key_list) == 0:\r\n raise Exception('ERROR (in gps._build_path()) : there is no route.')\r\n \r\n path.append(key_list[0])\r\n \r\n return path",
"def a_star_obs(obs_map):\n world_ndarray = np.copy(obs_map[0])\n\n start = tuple(np.argwhere(world_ndarray == -2)[0])\n goal = tuple(np.argwhere(world_ndarray == -3)[0])\n\n world_ndarray[world_ndarray == -2] = 0\n world_ndarray[world_ndarray == -3] = 0\n\n world_tuple = tuple(map(tuple, world_ndarray))\n\n def h_custom_i(cur, end, obstacle):\n ytop, ybot, minx = obstacle\n cur_y, cur_x = cur\n end_y, end_x = end\n obs_bot = np.where(world_ndarray[ybot] == -1)[0][0]\n mid_y = ybot + (ytop - ybot) // 2\n if cur_y in range(ybot, ytop) and cur_x in range(max(obs_bot, start[1]), end_x):\n return 5000 - abs(minx - cur_x) ** 2 - abs(cur_y - mid_y) ** 2\n return abs(cur_x - end_x) + abs(cur_y - end_y)\n\n pr_queue = [] # Use heapqueue as priority queue\n heappush(pr_queue, (0 + h_custom_i(start, goal, obs_map[1]), 0, \"\", start))\n visited = set() # Each element has to be unique in a set\n graph = get_neighbors(world_tuple)\n route_str = \"\"\n\n while pr_queue:\n _, cost, path, current = heappop(pr_queue)\n if current == goal:\n route_str = path\n break\n if current in visited:\n continue\n visited.add(current)\n for direction, neighbour in graph[current].iteritems():\n heappush(pr_queue, (cost + h_custom_i(neighbour, goal, obs_map[1]), cost + 1, path + direction, neighbour))\n world_ndarray[neighbour] = cost + 1\n\n # print \"Expanded nodes(A*+Custom H): \", len(visited), \" Path length: \", len(route_str)\n # Convert string directions to 2D(x,y) coordinates\n route_coord = [start]\n for p in route_str:\n route_coord.append(graph[route_coord[-1]][p])\n\n world_ndarray[start] = -2 # Mark the start and end coordinates again\n world_ndarray[goal] = -3\n\n return route_coord, world_ndarray, len(visited), len(route_str)",
"def get_path(self):\r\n path = [self.city_map.get_tile_at_position(self.position)]\r\n destination_tile = self.city_map.get_tile_at_position(self.destination)\r\n\r\n neighbors = self.city_map.get_adjacent_intersections(self.position)\r\n closest_neighbor = None\r\n closest_neighbor_distance = math.inf\r\n for neighbor in neighbors:\r\n if distance(neighbor.position, self.position) < closest_neighbor_distance:\r\n closest_neighbor = neighbor\r\n closest_neighbor_distance = distance(neighbor.position, self.position)\r\n\r\n path.append(closest_neighbor)\r\n\r\n while destination_tile not in path:\r\n current_tile = path[-1]\r\n neighbors = self.city_map.get_adjacent_intersections(current_tile.position)\r\n closest_neighbor = None\r\n closest_neighbor_distance = math.inf\r\n for neighbor in neighbors:\r\n if current_tile.position['y'] == self.destination['y'] or current_tile.position['x'] == \\\r\n self.destination['x']:\r\n # If we're on the same row\r\n if current_tile.position['y'] == self.destination['y']:\r\n # And if the destination is between our current position and the next intersection over, go to it\r\n if (current_tile.position['x'] < self.destination['x'] <= neighbor.position['x'] or\r\n neighbor.position['x'] <= self.destination['x'] < current_tile.position['x']):\r\n path.append(destination_tile)\r\n break\r\n # If the neighbor gets us closer, go to it\r\n elif distance(neighbor.position, self.destination) < distance(current_tile.position,\r\n self.destination):\r\n path.append(neighbor)\r\n break\r\n if current_tile.position['x'] == self.destination['x']:\r\n if (current_tile.position['y'] < self.destination['y'] <= neighbor.position['y'] or\r\n neighbor.position['y'] <= self.destination['y'] < current_tile.position['y']):\r\n path.append(destination_tile)\r\n break\r\n # If the neighbor gets us closer, go to it\r\n elif distance(neighbor.position, self.destination) < distance(current_tile.position,\r\n self.destination):\r\n path.append(neighbor)\r\n break\r\n\r\n elif distance(neighbor.position, self.destination) < closest_neighbor_distance:\r\n closest_neighbor = neighbor\r\n closest_neighbor_distance = distance(neighbor.position, self.destination)\r\n\r\n # If the last iteration through the loop did reach the destination, don't append this\r\n if destination_tile not in path and closest_neighbor is not None:\r\n path.append(closest_neighbor)\r\n\r\n return path",
"def is_obstacle_in_path(self):\n for obstacle in self.obstacles.tolist():\n print(\"obstacle.get_point():\", obstacle.get_point())\n dist_to_obstacle = VectorMath.get_vector_magnitude(np.subtract(obstacle.get_point(), self.drone.get_point()))\n if dist_to_obstacle < obstacle.get_radius() + Constants.DETECTION_THRESHOLD:\n if isinstance(obstacle, StationaryObstacle):\n paths = self.generate_possible_paths(obstacle)\n\n if len(paths) != 0:\n return True, np.array(paths)\n elif isinstance(obstacle, MovingObstacle):\n pass\n\n return False, None",
"def plan_path(self, start_point, end_point, map_obj):\n # STUFF FOR TESTING \n if self.enable_vis:\n marker = Marker()\n marker.header.frame_id = \"/map\"\n marker.type = marker.POINTS\n marker.action = marker.ADD\n \n marker.scale.x = 0.1\n marker.scale.y = 0.1\n self.vis_pub.publish(marker)\n \n exploration_bias = 1.0 - self.goal_bias\n final_node = None\n num_existing_path_points_added = 0\n \n self.rrt_star = RRTStar(Node(start_point))\n self.max_iterations = self.rrt_star.max_size\n while self.rrt_star.size <= self.max_iterations:\n p = np.random.uniform()\n if p < exploration_bias:\n \n x_rand = self.map.sample_free_space()\n else:\n if final_node is None:\n x_rand = end_point\n else:\n x_rand = self.branched_from_existing_path(\n final_node,\n depth_underestimate=num_existing_path_points_added\n )\n num_existing_path_points_added += 1\n\n x_nearest = self.rrt_star.nearest(x_rand) # Find the nearest node to x_rand\n\n path = self.map.generate_line_path(x_nearest.value, x_rand, eta=self.eta)\n if path is not None: # no obstacles between x_nearest and x_rand\n x_new = path[-1]\n X_nearby_connectable = self.find_nearby_connectable(x_nearest, x_new)\n\n cost_min, node_min = self.find_best_parent(X_nearby_connectable, x_new)\n\n X_nearby_connectable.remove(node_min) # Remove x_new's parent node from the list of nearby nodes so it is not considered for rewiring\n \n # Create the new node at x_new!\n node_new = self.rrt_star.add_config(node_min, x_new)\n \n if self.enable_vis:\n # FOR TESTING ONLY #\n # Code to publish marker for new node\n ###########################################################################################\n TEMP = Point()\n TEMP.x = x_new[0]\n TEMP.y = x_new[1]\n TEMP.z = .05\n marker.points.append(TEMP)\n \n TEMP = ColorRGBA()\n TEMP.r = 1\n TEMP.g = 0\n TEMP.b = 0\n TEMP.a = 1\n \n marker.colors.append(TEMP)\n \n self.vis_pub.publish(marker)\n ###########################################################################################\n\n self.rewire(cost_min, node_new, X_nearby_connectable)\n \n if np.allclose(node_new.value, end_point, .05, 0) and (final_node is None):#np.array_equal(node_new.value, end_point):\n final_node = node_new\n # reduce exploration bias so that we reinforce the existing path\n exploration_bias = .5\n if VERBOSE:\n print(\"Path found!!!!\")\n print(final_node.cost)\n if rospy.get_time() - self.start_time > self.time_thresh:\n if VERBOSE:\n print(self.rrt_star.size)\n break\n\n \n if final_node is not None:\n if self.enable_vis:\n marker = Marker()\n marker.header.frame_id = \"/map\"\n marker.type = marker.POINTS\n marker.action = marker.ADD\n \n marker.scale.x = 0.1\n marker.scale.y = 0.1\n marker.points = []\n marker.colors = []\n def recur(node):\n if self.enable_vis:\n TEMP = Point()\n TEMP.x = node.value[0]\n TEMP.y = node.value[1]\n TEMP.z = .05\n marker.points.append(TEMP)\n \n TEMP = ColorRGBA()\n TEMP.r = 1\n TEMP.g = 0\n TEMP.b = 0\n TEMP.a = 1\n \n marker.colors.append(TEMP)\n \n \n self.trajectory.points.append([node.value[0], node.value[1]])\n parent = node.parent\n if parent is not None:\n recur(parent)\n recur(final_node)\n self.trajectory.points.reverse()\n if self.enable_vis:\n self.vis_pub.publish(marker)\n if VERBOSE:\n print (final_node.depth)\n else:\n if VERBOSE:\n print(\"No path found! Please try again.\")\n \n \n \n # publish trajectory\n self.traj_pub.publish(self.trajectory.toPoseArray())\n\n # visualize trajectory Markers\n self.trajectory.publish_viz()",
"def obstacles(self):\r\n\r\n #Radious arround the head\r\n limit_sight = self.snake_sight\r\n head = self.body[0].position\r\n binary_map_complete = self.complete_mapping()\r\n map_matrix = np.matrix(binary_map_complete)\r\n obstacles = []\r\n\r\n #limits in all directions\r\n left_x = head[0] - limit_sight\r\n right_x = head[0] + limit_sight\r\n up_y = head[1] - limit_sight\r\n down_y = head[1] + limit_sight\r\n\r\n #submatrix with limits size\r\n snake_sight = map_matrix[up_y:down_y+1, left_x:right_x+1]\r\n\r\n #Special cases where the snake approximates to the borders\r\n ##Corners\r\n if left_x < 0 and up_y < 0:\r\n snake_sight = map_matrix[0:down_y+1, 0:right_x+1]\r\n interval_x = [self.limits[0] + left_x, self.limits[0]]\r\n interval_y = [self.limits[1] + up_y, self.limits[1]]\r\n interval_x_matrix = map_matrix[0:down_y+1, interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], 0:right_x+1]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_x_y_matrix, interval_y_matrix]\r\n snake_sight = np.c_[interval_x_matrix, snake_sight]\r\n snake_sight = np.r_[temporal, snake_sight] \r\n return snake_sight\r\n \r\n if left_x < 0 and down_y > self.limits[1] - 1:\r\n snake_sight = map_matrix[up_y:self.limits[1], 0:right_x+1]\r\n interval_x = [self.limits[0] + left_x, self.limits[0]]\r\n interval_y = [0, down_y - self.limits[1] + 1]\r\n interval_x_matrix = map_matrix[up_y:self.limits[1], interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], 0:right_x+1]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_x_y_matrix, interval_y_matrix]\r\n snake_sight = np.c_[interval_x_matrix, snake_sight]\r\n snake_sight = np.r_[snake_sight, temporal]\r\n return snake_sight\r\n \r\n if right_x > self.limits[0]-1 and up_y < 0:\r\n snake_sight = map_matrix[0:down_y+1, left_x:self.limits[0]]\r\n interval_x = [0, right_x - self.limits[0] + 1]\r\n interval_y = [self.limits[1] + up_y, self.limits[1]]\r\n interval_x_matrix = map_matrix[0:down_y+1, interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:self.limits[0]]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_y_matrix, interval_x_y_matrix]\r\n snake_sight = np.c_[snake_sight, interval_x_matrix]\r\n snake_sight = np.r_[temporal, snake_sight]\r\n return snake_sight\r\n \r\n if right_x > self.limits[0]-1 and down_y > self.limits[1]-1:\r\n snake_sight = map_matrix[up_y:self.limits[1], left_x:self.limits[0]]\r\n interval_x = [0, right_x - self.limits[0] + 1]\r\n interval_y = [0, down_y - self.limits[1] + 1]\r\n interval_x_matrix = map_matrix[up_y:self.limits[1], interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:self.limits[0]]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_y_matrix, interval_x_y_matrix]\r\n snake_sight = np.c_[snake_sight, interval_x_matrix]\r\n snake_sight = np.r_[snake_sight, temporal]\r\n return snake_sight\r\n\r\n ##Middle\r\n if left_x < 0:\r\n snake_sight = map_matrix[up_y:down_y+1, 0:right_x+1]\r\n interval_x = [self.limits[0] + left_x, self.limits[0]]\r\n interval_x_matrix = map_matrix[up_y:down_y+1, interval_x[0]:interval_x[1]]\r\n snake_sight = np.c_[interval_x_matrix, snake_sight]\r\n return snake_sight\r\n\r\n if right_x > self.limits[0]-1:\r\n snake_sight = map_matrix[up_y:down_y+1, left_x:self.limits[0]]\r\n interval_x = [0, right_x - self.limits[0] + 1]\r\n interval_x_matrix = map_matrix[up_y:down_y+1, interval_x[0]:interval_x[1]]\r\n snake_sight = np.c_[snake_sight, interval_x_matrix]\r\n return snake_sight\r\n\r\n if up_y < 0:\r\n snake_sight = map_matrix[0:down_y+1, left_x:right_x+1]\r\n interval_y = [self.limits[1] + up_y, self.limits[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:right_x+1]\r\n snake_sight = np.r_[interval_y_matrix, snake_sight]\r\n return snake_sight\r\n \r\n if down_y > self.limits[1]-1:\r\n snake_sight = map_matrix[up_y:self.limits[1], left_x:right_x+1]\r\n interval_y = [0, down_y - self.limits[1] + 1]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:right_x+1]\r\n snake_sight = np.r_[snake_sight, interval_y_matrix]\r\n return snake_sight\r\n\r\n return snake_sight",
"def find_good_paths(self):\n return self.robot_step((0,0),[])",
"def IteratePaths(self):\n self.w = self.setwage(self.K, self.N)\n self.r = self.setrate(self.K, self.N)\n self.b = self.benefit(self.N)\n\n a1, aT = [-1,], []\n\n for q in range(self.Nq):\n if q == 0:\n self.apath[-1] = 0.2\n elif q == 1:\n self.apath[-1] = 0.3\n else:\n self.apath[-1] = max(0,aT[-1]-(aT[-1]-aT[-2])*a1[-1]/(a1[-1]-a1[-2]))\n \n self.npath[-1] = 0\n self.cpath[-1] = self.apath[-1]*(1+self.r) + self.b\n\n for y in range(-2,-(self.T+1),-1): # y = -2, -3,..., -60\n self.apath[y], self.npath[y], self.cpath[y] = self.DirectSolve(y)\n\n aT.append(self.apath[-1])\n a1.append(self.apath[-self.T])\n if (fabs(self.apath[-self.T])<self.tol):\n break\n for y in range(-1,-(self.T+1),-1):\n self.upath[y] = self.util(self.cpath[y],self.npath[y])",
"def _getPathPair(\n self, speed=2, smoothPoints=5, collisionShrink=0.05, pathDelay=1\n ):\n # break out these parameters later\n # smoothPoints = 5\n # collisionShrink = 0.05 # mm, this value shoudld probably be a function of stepsize\n # speed = 2 # rpm at output\n # pathDelay = 1 # seconds\n ###########\n\n self.smoothPaths(smoothPoints)\n self.simplifyPaths()\n self.shrinkCollisionBuffer(collisionShrink)\n self.verifySmoothed(self.nSteps)\n # print(\"got %i smoothed collisions in getPathPair\"%self.smoothCollisions)\n self.growCollisionBuffer(collisionShrink)\n\n toDestination = {}\n fromDestination = {}\n\n for r in self.robotDict.values():\n\n # if robot is offline, don't get a path for it\n if r.isOffline:\n continue\n\n ibp = np.array(r.simplifiedBetaPath)\n\n\n iap = np.array(r.simplifiedAlphaPath)\n\n\n alphaTimesR = iap[:, 0] * self.stepSize / (speed * 360 / 60.)\n alphaDegR = iap[:, 1]\n betaTimesR = ibp[:, 0] * self.stepSize / (speed * 360 / 60.)\n betaDegR = ibp[:, 1]\n\n\n # add time buffer for the reverse path, in case robot is\n # not exactly starting from the expected spot.\n # build path from initial state to destination\n armPathR = {}\n armPathR[\"alpha\"] = [(pos, time + pathDelay) for pos, time in zip(alphaDegR, alphaTimesR)]\n armPathR[\"beta\"] = [(pos, time + pathDelay) for pos, time in zip(betaDegR, betaTimesR)]\n\n toDestination[int(r.id)] = armPathR\n\n # build path from destination to initial state\n alphaTimesF = np.abs(alphaTimesR - alphaTimesR[-1])[::-1]\n alphaDegF = alphaDegR[::-1]\n betaTimesF = np.abs(betaTimesR - betaTimesR[-1])[::-1]\n betaDegF = betaDegR[::-1]\n\n armPathF = {}\n armPathF[\"alpha\"] = [(pos, time + pathDelay) for pos, time in zip(alphaDegF, alphaTimesF)]\n armPathF[\"beta\"] = [(pos, time + pathDelay) for pos, time in zip(betaDegF, betaTimesF)]\n\n\n fromDestination[int(r.id)] = armPathF\n\n return toDestination, fromDestination",
"def _build_path(self):\n for point_3d in self.path_coordinates:\n self.connect_point_with_neighbors(point_3d)",
"def paths(self, start):\n # This is probably a little slow\n tupadd = lambda p, v: (p[0] + v[0], p[1] + v[1])\n # First, we'll check adjacency moves.\n adj = [tupadd(start, v) for v in DIRECTIONS]\n yield from (p for p in adj if self.board(p) == 0)\n # Now we check repeated hops.\n # We do this by a breadth first search.\n\n #TODO: Consensus on legality of hopping back to start and \"skipping\"\n visited = set(adj)\n to_visit = [start]\n while len(to_visit):\n pt = to_visit.pop(0)\n if pt in visited:\n continue\n\n # We have to actually move a piece\n # But this stops us from considering \"start\" even if we can\n # make some hops and get back to start\n if pt is not start:\n yield pt\n \n visited.add(pt)\n # Compute the hop directions\n dirs = ((tupadd(pt, v), tupadd(pt, tupadd(v, v))) for v in DIRECTIONS)\n to_visit.extend(\n dest for over, dest in dirs\n if self.board(over) > 0\n and self.board(dest) == 0\n and dest not in visited\n and over != start\n )",
"def prm_planning(start_x, start_y, goal_x, goal_y,\n obstacle_x_list, obstacle_y_list, robot_radius, *, rng=None):\n obstacle_kd_tree = KDTree(np.vstack((obstacle_x_list, obstacle_y_list)).T)\n\n sample_x, sample_y = sample_points(start_x, start_y, goal_x, goal_y,\n robot_radius,\n obstacle_x_list, obstacle_y_list,\n obstacle_kd_tree, rng)\n if show_animation:\n plt.plot(sample_x, sample_y, \".b\")\n\n road_map = generate_road_map(sample_x, sample_y,\n robot_radius, obstacle_kd_tree)\n\n rx, ry = dijkstra_planning(\n start_x, start_y, goal_x, goal_y, road_map, sample_x, sample_y)\n\n return rx, ry",
"def carve_path(self):\n final = self.length # once we reach the last length, we set the goal and terminate\n w, l, h = 0, 0, 0 # start at 0,0,0\n last_move_name, last_move_tuple = \"forward\", (0, 1, 0) # we don't want to repeat the last movement\n moves = {\"back\": (0, -1, 0), \"left\": (-1, 0, 0), \"right\": (1, 0, 0), \"up\": (0, 0, 1),\n \"down\": (0, 0, -1)} # possible moves\n self.world_grid[w][l][h] = blocks[\"empty\"] # set the current block empty\n while l != final:\n move, (m_w, m_l, m_h) = random.choice(list(moves.iteritems())) # get a move\n w += m_w # apply move\n l += m_l\n h += m_h\n self.world_grid[w][l][h] = blocks[\"empty\"] # set that cell empty\n moves[last_move_name] = last_move_tuple # add back in the last move to movelist\n last_move_name, last_move_tuple = move, (m_w, m_l, m_h) # copy the current move to last move\n moves.pop(last_move_name) # remove the current\n self.goal = (w, l, h) # after terminating, set this as the goal",
"def generate_path(goal_node, visited):\n goal_state = goal_node['state']\n path = [goal_state]\n while goal_node['parent']:\n path.append(goal_node['state'])\n goal_node = visited[goal_node['parent']]\n return path",
"def solution(self):\n return [node.move for node in self.path()[1:]]",
"def solution_path(self) -> list[State]:",
"def path_and_costmap_from_config(params):\n # we assume right turn, we can always flip it\n turn_params = params.turn_params\n\n hh = turn_params.main_corridor_length / 2\n w = turn_params.turn_corridor_length / 2\n alpha = turn_params.turn_corridor_angle\n dd = turn_params.main_corridor_width\n z = turn_params.turn_corridor_width\n margin = turn_params.margin\n flip_arnd_oy = turn_params.flip_arnd_oy\n flip_arnd_ox = turn_params.flip_arnd_ox\n rot_theta = turn_params.rot_theta\n\n pts = _draw_pts_in_standard_coords(dd, hh, alpha, z, w)\n oriented_way_pts = _generate_path_in_standard_coords(dd, hh, alpha, z, w)\n\n # Maybe transform the points\n rot_mtx = _rotation_matrix(rot_theta)\n\n flipping_mtx = np.array(\n [[-1. if flip_arnd_oy else 1., 0.],\n [0., -1. if flip_arnd_ox else 1.]],\n )\n transform_mtx = np.dot(rot_mtx, flipping_mtx)\n\n new_pts = []\n\n for pt in pts:\n new_pt = np.dot(transform_mtx, pt)\n new_pts.append(new_pt)\n\n new_oriented_way_pts = []\n for pt in oriented_way_pts:\n x, y, t = pt\n nx, ny = np.dot(transform_mtx, np.array([x, y]))\n new_angle = t\n if flip_arnd_ox:\n new_angle = -new_angle\n if flip_arnd_oy:\n new_angle = np.pi - new_angle\n new_angle = np.mod(new_angle + rot_theta, 2 * np.pi)\n new_pt = np.array([nx, ny, new_angle])\n new_oriented_way_pts.append(new_pt)\n\n a, _, c, d, e, _, g, h, i, j = new_pts # pylint: disable=unbalanced-tuple-unpacking\n rb, rk, rl, rf = new_oriented_way_pts # pylint: disable=unbalanced-tuple-unpacking\n all_pts = np.array(list(new_pts))\n\n min_x = all_pts[:, 0].min()\n max_x = all_pts[:, 0].max()\n min_y = all_pts[:, 1].min()\n max_y = all_pts[:, 1].max()\n\n world_size = abs(max_x - min_x) + 2 * margin, abs(max_y - min_y) + 2 * margin\n world_origin = min_x - margin, min_y - margin\n\n obstacles = [\n Wall(from_pt=a, to_pt=i),\n Wall(from_pt=c, to_pt=d),\n Wall(from_pt=d, to_pt=e),\n Wall(from_pt=j, to_pt=g),\n Wall(from_pt=g, to_pt=h)\n ]\n\n static_path = np.array([rb, rk, rl, rf])\n\n static_map = CostMap2D.create_empty(\n world_size=world_size, # x width, y height\n resolution=params.env_params.resolution,\n world_origin=world_origin\n )\n\n for obs in obstacles:\n static_map = obs.render(static_map)\n\n return static_path, static_map",
"def plan_path(self, msg):\n # Request the map\n # In case of error, return an empty path\n mapdata = PathPlanner.request_map()\n\n if mapdata is None:\n return Path()\n # Calculate the C-space and publish it\n cspacedata = self.calc_cspace(mapdata, 3)\n # Execute A*\n start = PathPlanner.world_to_grid(mapdata, msg.start.pose.position)\n goal = PathPlanner.world_to_grid(mapdata, msg.goal.pose.position)\n \n path = self.a_star(cspacedata, start, goal) #, self.c_space_array, self.frontier, self.expanded)\n \n # Optimize waypoints\n waypoints = PathPlanner.optimize_path(path)\n # print waypoints\n waypoints.remove(waypoints[0])\n # print waypoints\n\n self.path_pub.publish(self.path_to_message(cspacedata, waypoints))\n # Return a Path message\n return self.path_to_message(cspacedata, waypoints)",
"def dijkstras(occupancy_map,x_spacing,y_spacing,start,goal):\n ROWS, COLS = occupancy_map.shape\n #convert physical location to index in the grid\n startNode = locToIndex(start, x_spacing, y_spacing)\n startingNodeLoc = indexToLoc(startNode, x_spacing, y_spacing)\n initialcost = math.sqrt((startingNodeLoc[0] - start[0])**2 + (startingNodeLoc[1] - start[1])**2)\n goalNode = locToIndex(goal, x_spacing, y_spacing)\n \n freelist = np.where(occupancy_map == 0)\n if occupancy_map[startNode[0], startNode[1]] != 0:\n #raise ValueError(\"start : ({}, {}) invalid, is an obstacle\".format(startNode[0], startNode[1]))\n startNode = findValidNode(startNode, start, occupancy_map, x_spacing, y_spacing)\n if occupancy_map[goalNode[0], goalNode[1]] != 0:\n #raise ValueError(\"goal: ({}, {}) invalid, is an obstacle\".format(goalNode[0], goalNode[1]))\n goalNode = findValidNode(goalNode, goal, occupancy_map, x_spacing, y_spacing)\n candidate = [ [sys.float_info.max, \n i, (freelist[0][i], freelist[1][i])] for i in range(len(freelist[0]))] \n visited = set([])\n queue = PriorityQueue(candidate)\n paths = {}\n found = False\n\n #update initial cost\n queue.remove(startNode)\n queue.insert(startNode, initialcost)\n paths[startNode] = None\n updateInitial(occupancy_map, ROWS, COLS, start, startNode, 0, 1, queue, paths, x_spacing, y_spacing, initialcost)\n updateInitial(occupancy_map, ROWS, COLS, start, startNode, 0, -1, queue, paths, x_spacing, y_spacing, initialcost)\n updateInitial(occupancy_map, ROWS, COLS, start, startNode, 1, 0, queue, paths, x_spacing, y_spacing, initialcost)\n updateInitial(occupancy_map, ROWS, COLS, start, startNode, -1, 0, queue, paths, x_spacing, y_spacing, initialcost)\n while queue.size() > 0:\n priority, current = queue.pop()\n if current == goalNode:\n found = True\n break\n #not reaching goal node yet, for each of its neighbor, update the weight\n visited.add(current)\n update(occupancy_map, ROWS, COLS, current, 0, 1, priority, queue, paths, visited, x_spacing, y_spacing)\n update(occupancy_map, ROWS, COLS, current, 0, -1, priority, queue, paths, visited, x_spacing, y_spacing)\n update(occupancy_map, ROWS, COLS, current, 1, 0, priority, queue, paths, visited, x_spacing, y_spacing)\n update(occupancy_map, ROWS, COLS, current, -1, 0, priority, queue, paths, visited, x_spacing, y_spacing)\n \n if not found:\n raise ValueError(\"fail to find shortest path\")\n node = goalNode\n shortestpath = []\n while node is not None:\n shortestpath.append(node)\n node = paths[node]\n #shortestpath.append(startNode)\n #print (startNode)\n #print ('*', list(reversed(shortestpath)))\n #print (goalNode)\n p = list(reversed([ indexToLoc(n, x_spacing, y_spacing) for n in shortestpath]))\n #start and final position may not fall on center of the grid\n if abs(p[0][0] - start[0]) > 0.0005 or abs(p[0][1] - start[1]) > 0.0005:\n p.insert(0, [start[0][0], start[1][0]])\n if abs(p[-1][0] - goal[0]) > 0.0005 or abs(p[-1][1] - goal[1]) > 0.0005:\n p.append([goal[0][0], goal[1][0]])\n res = np.array(p)\n print (res)\n return res",
"def make_path(self, loop):\n path=Path(closed=True)\n for c in range(0,len(loop)):\n connection=loop[c]\n nextConnection = loop[(c+1)%len(loop)]\n lastConnection = loop[(c-1)%len(loop)]\n if self.dontFill(connection, nextConnection, lastConnection):\n return False\n cp = self.corner_pos(connection, nextConnection, lastConnection)\n corner_offset = cp[0]\n corner_dir = cp[1]\n # catch case when it is the end of a single rod\n if type(corner_offset) is list:\n endpoints = corner_offset\n corner_offset = endpoints[0]\n else:\n endpoints = False\n if connection.other.radius is not None and ( corner_offset.length() < connection.other.radius or corner_dir >0):\n # catch case when it is the end of a single rod\n if endpoints:\n para=(connection.this.pos-connection.other.pos).normalize()\n d = math.sqrt(connection.other.radius**2 - corner_offset.length()**2)\n path.add_point(PSharp(connection.other.pos + corner_offset + d*para))\n path.add_point(PArc(connection.other.pos, radius=connection.other.radius, direction='cw'))\n path.add_point(PSharp(connection.other.pos - corner_offset+d*para))\n else:\n path.add_point(PAroundcurve(connection.other.pos + corner_offset, centre=connection.other.pos, radius=connection.other.radius, direction='cw'))\n\n elif self.get_intRadius(connection, connection.other) is not None:\n # path.add_point(PIncurve(connection.other.pos + corner_offset, radius=self.get_intRadius(connection, connection.other)))\n # path.add_point(PIncurve(connection.other.pos - corner_offset, radius=self.get_intRadius(connection, connection.other)))\n # path.add_point(PIncurve(connection.other.pos - corner_offset, radius=self.get_intRadius(connection, connection.other)))\n path.add_point(PIncurve(connection.other.pos + corner_offset, radius=self.get_intRadius(connection, connection.other)))\n else:\n cornerpos = self.corner_pos(connection, nextConnection, lastConnection)\n if type(cornerpos) is list:\n path.add_point(PSharp(connection.other.pos + cornerpos[0]))\n path.add_point(PSharp(connection.other.pos + cornerpos[1]))\n else:\n path.add_point(PSharp(connection.other.pos + cornerpos))#self.corner_pos(connection, nextConnection, lastConnection)))\n # path.add_point(PSharp(connection.other.pos - cornerpos))#self.corner_pos(connection, nextConnection, lastConnection)))\n if connection.other.holeRad is not None:\n if type(connection.other.holeRad) is int or type(connection.other.holeRad) is float:\n self.otherpaths.append(Circle(connection.other.pos, rad=connection.other.holeRad, side='in'))\n else:\n t=copy.deepcopy(connection.other.holeRad)\n t.translate(connection.other.pos)\n self.otherpaths.append(t)\n return path",
"def get_path(self, grid, start_wp, end_wp):\n # The open and closed sets\n openset = set()\n closedset = set()\n\n # Add the starting point to the open set\n openset.add(start_wp)\n\n # While the open set is not empty\n while openset:\n # Find the waypoint in the open set with the lowest G + H score\n current_wp = min(openset, key=lambda o: o.G + o.H)\n # Found the goal\n if current_wp == end_wp:\n path = []\n while current_wp.parent:\n path.append(current_wp)\n current_wp = current_wp.parent\n path.append(current_wp)\n print(\"Path found in {} moves: {}\".format(len(path), path))\n return path[::-1]\n\n # Remove the waypoint from the open set\n openset.remove(current_wp)\n # Add it to the closed set\n closedset.add(current_wp)\n\n # Generate children\n children = current_wp.generate_children(grid)\n\n for waypoint in children:\n # If it is already in the closed set, skip it\n if waypoint in closedset:\n continue\n # Otherwise if it is already in the open set\n if waypoint in openset:\n # Check if we beat the G score\n new_g = current_wp.G + 1\n\n if waypoint.G > new_g:\n # If so, update the waypoint to have a new parent\n waypoint.G = new_g\n waypoint.parent = current_wp\n else:\n # If it isn't in the open set, calculate the G and H score for the waypoint\n if waypoint.orientation != current_wp.orientation:\n waypoint.G = current_wp.G + 1.5 # Avoiding zigzag move by increase the cost of a rotation\n else:\n waypoint.G = current_wp.G + 1\n\n waypoint.H = abs(waypoint.x - end_wp.x) + abs(waypoint.y - end_wp.y)\n # Set the parent to our current_wp\n waypoint.parent = current_wp\n # Add it to the set\n openset.add(waypoint)\n\n # If there is no solution\n return [start_wp, end_wp]",
"def __generate_octagon_obstacles(self, world):\n obs_radius = self.cfg[\"obstacle\"][\"octagon\"][\"radius\"]\n obs_min_count = self.cfg[\"obstacle\"][\"octagon\"][\"min_count\"]\n obs_max_count = self.cfg[\"obstacle\"][\"octagon\"][\"max_count\"]\n obs_min_dist = self.cfg[\"obstacle\"][\"octagon\"][\"min_distance\"]\n obs_max_dist = self.cfg[\"obstacle\"][\"octagon\"][\"max_distance\"]\n\n # generate the obstacles\n obstacles = []\n obs_dist_range = obs_max_dist - obs_min_dist\n num_obstacles = randrange(obs_min_count, obs_max_count + 1)\n\n test_geometries = [r.global_geometry for r in world.robots]\n while len(obstacles) < num_obstacles:\n\n # generate position\n dist = obs_min_dist + (random() * obs_dist_range)\n phi = -pi + (random() * 2 * pi)\n x = dist * sin(phi)\n y = dist * cos(phi)\n\n # generate orientation\n theta = -pi + (random() * 2 * pi)\n\n # test if the obstacle overlaps the robots or the goal\n obstacle = OctagonObstacle(obs_radius, Pose(x, y, theta))\n intersects = False\n for test_geometry in test_geometries:\n intersects |= geometrics.convex_polygon_intersect_test(test_geometry, obstacle.global_geometry)\n if not intersects:\n obstacles.append(obstacle)\n return obstacles",
"def search(world_state, robot_pose, goal_pose):\n if world_state.shape[0] == 0 or world_state.shape[1] == 0:\n print(\"Error, empty world_state!!!\")\n return None\n if not is_pos_valid(robot_pose, world_state.shape):\n print(\"Error, invalid robot_pose!!!\", robot_pose)\n return None\n if not is_pos_valid(goal_pose, world_state.shape):\n print(\"Error, invalid goal_pose!!!\", goal_pose)\n return None\n\n directions = [(-1, 0), (1, 0), (0, -1), (0, 1)] # orthogonal directions\n found = False\n\n x, y = robot_pose\n g = 0\n h = heuristic(robot_pose, goal_pose)\n f = g + h\n open = [[f, x, y]]\n came_from = {}\n came_from[robot_pose] = None\n cost_so_far = {}\n cost_so_far[robot_pose] = 0\n\n while open:\n open.sort() # sort based on f value\n current = open.pop(0)\n\n x, y = current[1:]\n g = cost_so_far[(x, y)]\n\n if (x, y) == goal_pose:\n found = True\n break\n else:\n # find available next positions\n for direction in directions:\n x2 = x + direction[0]\n y2 = y + direction[1]\n\n # check whether x2 and y2 are valid\n if not is_pos_valid((x2, y2), world_state.shape):\n continue\n\n g2 = g + 1\n if world_state[x2, y2] == 0 and ((x2, y2) not in cost_so_far or g2 < cost_so_far[(x2, y2)]):\n\n h2 = heuristic((x2, y2), goal_pose)\n f2 = g2 + h2\n open.append([f2, x2, y2])\n came_from[(x2, y2)] = (x, y)\n cost_so_far[(x2, y2)] = g2\n if found:\n path = [goal_pose]\n current = goal_pose\n while came_from[current]:\n current = came_from[current]\n path.append(current)\n\n path.reverse()\n return path\n\n else:\n return None",
"def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n\n start = problem.getStartState()\n stack = util.Stack() # stack to keep track of frontier nodes where pacman has move\n stack.push(start)\n explored = set() # to keep track of explored areas\n route = []\n\n while not stack.isEmpty():\n current_position = stack.pop()\n explored.add(current_position)\n\n if problem.isGoalState(current_position):\n break\n for each in problem.getSuccessors(current_position):\n if each[0] not in explored: # x,y coordinates of positions we haven't visited are pushed onto stack\n # print(each)\n stack.push(each[0])\n route.append((current_position, each[0], each[1])) # record of movements to rebuild path (from,to,how)\n\n x = len(route)\n while x - 1 != 0: # loop clears out actions that dont come from previous position\n if route[x - 1][0] != route[x - 2][1]: # starts from goal and works backwards\n route.remove(route[x - 2])\n x = len(route)\n else:\n x -= 1\n # print(route)\n return [action[2] for action in route]"
] | [
"0.7479054",
"0.6964157",
"0.6800724",
"0.6652429",
"0.65608233",
"0.6515328",
"0.6502793",
"0.64869034",
"0.64225703",
"0.63867986",
"0.62653744",
"0.6237662",
"0.61990416",
"0.61127084",
"0.61037284",
"0.60940355",
"0.6085445",
"0.6071979",
"0.6071137",
"0.6048148",
"0.6047975",
"0.6042786",
"0.6037737",
"0.6025176",
"0.60099244",
"0.60093457",
"0.5997415",
"0.59826666",
"0.5979279",
"0.59777176"
] | 0.82060087 | 0 |
Determine if the UAV intersects an obstacle on the verticle axis | def does_uav_intersect_obstacle_vertically(self, obstacle, drone_point, waypoint):
if isinstance(obstacle, StationaryObstacle):
if drone_point[2] < obstacle.height + Constants.STATIONARY_OBSTACLE_SAFETY_RADIUS:
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def inside_obstacle(point, obstacle):\r\n for obs in obstacle:\r\n if point[0] > obs[0][0] and point[0] < obs[0][2] and point[1] > obs[1][0] and point[1] < obs[1][2]:\r\n return 1\r\n return 0",
"def inside_obstacle(point, obstacle):\r\n for obs in obstacle:\r\n if point[0] > obs[0][0] and point[0] < obs[0][2] and point[1] > obs[1][0] and point[1] < obs[1][2]:\r\n return 1\r\n return 0",
"def interior_contains(self, Vobj):\n try:\n if Vobj.is_vector(): # assume we were passed a point\n return self.polyhedron()._is_positive( self.eval(Vobj) ) \n except AttributeError:\n pass\n \n if Vobj.is_line(): \n return self.polyhedron()._is_zero( self.eval(Vobj) )\n elif Vobj.is_vertex(): \n return self.polyhedron()._is_positive( self.eval(Vobj) ) \n else: # Vobj.is_ray()\n return self.polyhedron()._is_nonneg( self.eval(Vobj) )",
"def does_path_intersect_obstacle_2d(self, obstacle, uav_point, waypoint):\n drone_point = uav_point[:-1]\n waypoint = waypoint[:-1]\n obstacle_point = obstacle.get_point()[:-1]\n\n waypoint_vector = np.subtract(waypoint, drone_point)\n obstacle_vector = np.subtract(obstacle_point, drone_point)\n obstacle_vector_magnitude = VectorMath.get_vector_magnitude(obstacle_vector)\n rejection_vector = VectorMath.get_vector_rejection(obstacle_vector, waypoint_vector)\n rejection_vector_magnitude = VectorMath.get_vector_magnitude(rejection_vector)\n\n # Uncomment for DEBUGGING ONLY\n print(\"Waypoint Vector: \" + str(waypoint_vector))\n print(\"Obstacle Vector: \" + str(obstacle_vector))\n print(\"Rejection Vector: \" + str(rejection_vector))\n print(\"Rejection Vector Magnitude: \" + str(rejection_vector_magnitude))\n print(\"Obstacle Radius: \" + str(obstacle.get_radius()))\n print(\"Distance From Obstacle: \" + str(VectorMath.get_vector_magnitude(np.subtract(uav_point, obstacle.get_point()))))\n\n if self.is_obstacle_in_path_of_drone(obstacle_vector, waypoint_vector):\n return rejection_vector_magnitude < obstacle.get_radius()\n\n return False",
"def isCollidingWithWall(self, vert, ent1Index, ent2, u, v):\n status = NOCOLLISION\n ent1 = self.listOfEntities[ent1Index]\n \n pt = vert - ent1.body.x.v\n \n vel = ent1.body.velocity.v + np.cross(ent1.body.omega.v, pt)\n \n# vel = QVRotation(ent.body.q,vel)\n \n n = np.cross(u,v)\n n = n/np.linalg.norm(n)\n \n Vr = vel\n Vrn = np.dot(Vr, n)\n \n if Vrn < 0:\n self.listOfCollisions.append(Collision(ent1Index,ent2,n,vert,Vr,-(Vr - (np.dot(np.dot(Vr,n),n)))))\n status = COLLISION\n \n return status",
"def interior_contains(self, Vobj):\n try:\n if Vobj.is_vector(): # assume we were passed a point\n return self.polyhedron()._is_positive( self.eval(Vobj) )\n except AttributeError:\n pass\n\n if Vobj.is_line():\n return self.polyhedron()._is_zero( self.eval(Vobj) )\n elif Vobj.is_vertex():\n return self.polyhedron()._is_positive( self.eval(Vobj) )\n else: # Vobj.is_ray()\n return self.polyhedron()._is_nonneg( self.eval(Vobj) )",
"def inside(self, uv):\n result = self._trimmed.Perform(gp_Pnt2d(uv[0], uv[1]))\n return result == TopAbs_IN",
"def _detect_obstacles(self):\n def _distance(point, line_point1, line_point2):\n \"\"\"calcuate the distance between a point and a line\"\"\"\n vec1 = line_point1 - point\n vec2 = line_point2 - point\n distance = np.abs(np.cross(vec1,vec2)) / np.linalg.norm(line_point1-line_point2)\n return distance\n\n def _acute_angle(point, line_point1, line_point2):\n \"\"\"detetrmine if the point is whithin the boundary of the line through law of cosines\"\"\"\n base_line = np.linalg.norm(line_point1-line_point2)\n assert base_line > 0, \"check the library useage\"\n line1 = np.linalg.norm(point - line_point1)\n line2 = np.linalg.norm(point - line_point2)\n cos_angle_1 = (base_line**2 + line1**2 - line2**2)/(2*base_line*line1)\n cos_angle_2 = (base_line**2 + line2**2 - line1**2)/(2*base_line*line2)\n if cos_angle_1 * cos_angle_2 > 0:\n return True\n else:\n return False\n\n if self.obstacles != \"None\": # if user assigned some obstacles\n for line in self.env_config: \n line_point1, line_point2 = np.array(line[0]), np.array(line[1])\n point = np.array(self.state[:2])\n distance = _distance(point, line_point1, line_point2)\n acute_angle = _acute_angle(point, line_point1, line_point2)\n if distance <= 0.02 and acute_angle:\n self.adsorption = True\n break\n else:\n self.adsorption = False",
"def check_obstructed(r1,r2): \n \n if r1==r2:\n return False\n \n #Densely sample line connecting r1 and r2.\n #If any of those sampled points is inside the rectangle, then the \n #line of sight intersects the rectangle and the tower's view is\n #obstructed.\n NP = 1000\n sampled_x = np.linspace(r1[0],r2[0],NP)\n sampled_y = np.linspace(r1[1],r2[1],NP)\n for x,y,w,h in self.coordinates__obstacles:\n for pt in xrange(NP):\n if (sampled_x[pt] > x) and (sampled_x[pt] < x+w) and \\\n (sampled_y[pt] > y) and (sampled_y[pt] < y+h):\n return True\n return False",
"def isOnInteriorSide(self, v):\n n = self.normalVect()\n return n.dotProduct(vector(self.vertices[0]) - vector(v)) > 0",
"def is_linearly_independent_2x2(u, v):\n uv = get_uv(u, v)\n if uv[0][0] * uv[1][1] - uv[1][0] * uv[0][1] != 0:\n return True\n else:\n return False",
"def is_in_obstacle(self, x: float, y: float) -> bool:\n for obstacle in self.obstacles:\n if obstacle.contains_point((x, y)):\n return True\n return False",
"def has_collide(self, obj):\n rect1 = self.anim.getRect()\n rect2 = obj.anim.getRect()\n \n rect1.move_ip(self.pos)\n rect2.move_ip(obj.pos)\n \n return rect1.colliderect(rect2)",
"def isColliding(self, vert, ent1Index, ent2Index, norm): #u, v\n \n ent1 = self.listOfEntities[ent1Index]\n ent2 = self.listOfEntities[ent2Index]\n status = NOCOLLISION\n \n pt1 = vert - ent1.body.x.v\n pt2 = vert - ent2.body.x.v\n \n vel1 = ent1.body.velocity.v + np.cross(ent1.body.omega.v, pt1)\n vel2 = ent2.body.velocity.v + np.cross(ent2.body.omega.v, pt2)\n \n# norm = np.cross(u,v)\n# norm = -norm/np.linalg.norm(norm)\n norm = norm/np.linalg.norm(norm)\n \n Vr = vel1 - vel2\n Vrn = np.dot(Vr, norm)\n \n if Vrn < 0:\n self.listOfCollisions.append(Collision(ent1Index,ent2Index,norm,vert,Vr,-(Vr - (np.dot(np.dot(Vr,norm),norm)))))\n status = COLLISION\n\n return status",
"def hit(self, otherball):\r\n dx = (self.unif[0] + self.vx) - (otherball.unif[0] + otherball.vx)\r\n dy = (self.unif[1] + self.vy) - (otherball.unif[1] + otherball.vy)\r\n rd = self.radius + otherball.radius\r\n return dot(dx, dy) < (rd * rd)",
"def _in_huc(shply, huc_shply):\n if huc_shply.contains(shply):\n return 2\n elif huc_shply.intersects(shply):\n return 1\n else:\n return 0",
"def collide(obj1, obj2):\n offset_x = obj2.x - obj1.x #The difference between obj1 and obj 2\n offset_y = obj2.y - obj1.y \n return obj1.mask.overlap(obj2.mask, (int(offset_x), int(offset_y))) != None # (x,y)",
"def has_intersection(self, obj):\r\n obj_x, obj_y = obj.get_location()\r\n x = self.__x\r\n y = self.__y\r\n # Distance formula\r\n distance = sqrt((obj_x - x) ** 2 + (obj_y - y) ** 2)\r\n if distance <= obj.get_radius() + self.__radius:\r\n return True\r\n return False",
"def contains(self, Vobj):\n try:\n if Vobj.is_vector(): # assume we were passed a point\n return self.polyhedron()._is_nonneg( self.eval(Vobj) ) \n except AttributeError:\n pass\n \n if Vobj.is_line(): \n return self.polyhedron()._is_zero( self.eval(Vobj) )\n else:\n return self.polyhedron()._is_nonneg( self.eval(Vobj) )",
"def origin_is_inside_hitbox(self, hitbox):\n if self.hitdetection.accurate:\n max_x = max(hitbox, key = lambda index: abs(index[0]))[0]\n max_y = max(hitbox, key = lambda index: abs(index[1]))[1]\n \n m = max(max_x, max_y)\n \n num_intersections = 0\n for i in range(0, len(hitbox), 1):\n if self.hitdetection.module.does_intersect([[m, m], [0, 0]], [hitbox[i], hitbox[(i + 1) % len(hitbox)]]):\n num_intersections += 1\n return [False, True][num_intersections % 2]\n else:\n has_smaller = False\n has_bigger = False\n for hx, hy in hitbox:\n if hx > 0 and hy > 0:\n has_bigger = True\n if hx < 0 and hy < 0:\n has_smaller = True\n return has_smaller and has_bigger",
"def check_shot_on_target(self, shot):\n # Defining a few variables to ease the reading\n # Here we define the x and y interval of the goal's segment\n x_min = min(self.s_pos.x, self.e_pos.x)\n x_max = max(self.s_pos.x, self.e_pos.x)\n\n y_min = min(self.s_pos.y, self.e_pos.y)\n y_max = max(self.s_pos.y, self.e_pos.y)\n\n # Shortening variables names\n o_x = shot.opponent.pos.x\n o_y = shot.opponent.pos.y\n\n # If the angle = pi / 2 or - pi / 2, then tan(angle) is undefined\n # In these cases, the shot is vertical, therefore it is valid\n # iff the x coordinate of the opponent is in the goal's x interval\n if abs(shot.angle) == math.pi / 2:\n return self.is_in_interval(x_min, x_max, o_x)\n\n # If the angle = 0, pi or -pi, then tan(angle) is 0 which can lead to \n # undefined intersection points (if the goal is vertical for example)\n # although there is an intersection point\n # \n # In these cases, the shot is horizontal, therefore it is valid\n # iff the y coordinate of the opponent is in the goal's y interval\n if abs(shot.angle) == math.pi or shot.angle == 0:\n return self.is_in_interval(y_min, y_max, o_y)\n\n # Using tan the least amount of time possible, for this is a slow function\n tan_theta = math.tan(shot.angle)\n\n # Define the LE of the shot\n le1 = LinearEquation(tan_theta, o_y - tan_theta * o_x)\n le2 = None\n\n # If the goal is vertical, finding the intersection point\n # is not possible using the normal way\n #\n # That being said, unless the LE of the shot is vertical too (which it \n # isn't as it is checked before hand) there has to be an intersection point\n # This intersection must happen when at the x coodinate of the goal's segment\n # therefore, it is possible to compute the y coordinate of the intersection by\n # computing the application of the shot's LE on this ex coordinate\n #\n # Then, the resulting y is valid iff it is in the goal's segment interval\n if self.e_pos.x - self.s_pos.x == 0:\n y = le1.apply(self.e_pos.x)\n return self.is_in_interval(y_min, y_max, y)\n\n # The normal way of solving the intersection of these two LEs\n else:\n\n # Shortening variables by computing the coefficient of the goal's LE\n ratio = (self.e_pos.y - self.s_pos.y) / (self.e_pos.x - self.s_pos.x)\n\n # If the lines are parallels (have the same coefficient) return False\n if math.tan(shot.angle) == ratio:\n return False\n\n # Defining the goal's LE\n le2 = LinearEquation(ratio, self.e_pos.y - self.e_pos.x * ratio)\n\n # Finding the intersection point of the two LEs\n # If there isn't one, return False (but there should be one\n # given all the asserts we do before hand, this is just for completion sake)\n p_intersect = le1.intersection(le2)\n if p_intersect == None:\n return False\n\n # If the intersection point's abscissa is in the goal's x interval, then it is\n # a valid abstracted shot going \n return self.is_in_interval(x_min, x_max, p_intersect.x)",
"def intersect(self, sprite):\n return not ((self.left > sprite.right)\n or (self.right < sprite.left)\n or (self.top < sprite.bottom)\n or (self.bottom > sprite.top))",
"def check_intersection(obj1, obj2):\n (x1, y1, w1, h1) = obj1.get_box()\n (x2, y2, w2, h2) = obj2.get_box()\n if x2 + w2 - 1 < x1 or x2 >= x1 + w1:\n return False\n if y2 + h2 - 1 < y1 or y2 >= y1 + h1:\n return False\n \n return True",
"def check_for_obstacles(self):\n obs = False\n obs_p = []\n for point in self.obstacles:\n if -0.15 <= point[1] <= 0.15: # robot is 178mm wide\n # Obstacles should be less than or equal to 0.2 m away before being detected\n if 0 <= point[0] <= .2:\n obs_p.append(point)\n obs = True\n if obs:\n pos = self.determine_pos_of_obstacle(obs_p)\n data = Obstacle()\n data.x = pos[0]\n data.y = pos[1]\n data.obstacle = True\n self.obs_pub.publish(data)",
"def is_rectangle_colliding(self, rectangle):\n for obstacle in self.obstacle_iterator():\n if rectangle.colliderect(obstacle.rect):\n return True\n return False",
"def intersects(self, cuboid):\n\t\treturn ( cuboid.front >= self.back and cuboid.back < self.front\n\t\t\tand cuboid.right >= self.left and cuboid.left < self.right\n\t\t\tand cuboid.bottom >= self.top and cuboid.top < self.bottom )",
"def _is_collider(self, u, v, w):\n if v in self._children[u] and v in self._children[w]:\n return True\n elif v in self._children[u] and v in self._spouses[w]:\n return True\n elif v in self._spouses[u] and v in self._children[w]:\n return True\n elif v in self._spouses[u] and v in self._spouses[w]:\n return True\n else:\n return False",
"def is_obstacle_in_path(self):\n for obstacle in self.obstacles.tolist():\n print(\"obstacle.get_point():\", obstacle.get_point())\n dist_to_obstacle = VectorMath.get_vector_magnitude(np.subtract(obstacle.get_point(), self.drone.get_point()))\n if dist_to_obstacle < obstacle.get_radius() + Constants.DETECTION_THRESHOLD:\n if isinstance(obstacle, StationaryObstacle):\n paths = self.generate_possible_paths(obstacle)\n\n if len(paths) != 0:\n return True, np.array(paths)\n elif isinstance(obstacle, MovingObstacle):\n pass\n\n return False, None",
"def is_collision_by_map_obstacle(self):\n for content in self.contents:\n if self.content.y == self.y and self.content.x == self.x:\n return True\n else:\n return False",
"def contains(self, Vobj):\n try:\n if Vobj.is_vector(): # assume we were passed a point\n return self.polyhedron()._is_nonneg( self.eval(Vobj) )\n except AttributeError:\n pass\n\n if Vobj.is_line():\n return self.polyhedron()._is_zero( self.eval(Vobj) )\n else:\n return self.polyhedron()._is_nonneg( self.eval(Vobj) )"
] | [
"0.66017944",
"0.66017944",
"0.65201724",
"0.6504722",
"0.6439276",
"0.6425067",
"0.638491",
"0.6364111",
"0.63495326",
"0.6324251",
"0.6221943",
"0.6213708",
"0.6208691",
"0.61606497",
"0.6156494",
"0.6152293",
"0.6147638",
"0.6138719",
"0.6131911",
"0.6120432",
"0.6107103",
"0.609772",
"0.608096",
"0.6080534",
"0.6069908",
"0.60662115",
"0.6062206",
"0.6060807",
"0.6045898",
"0.60382247"
] | 0.71816885 | 0 |
Determine if the vector between a UAV's position and the current waypoint intersect an obstacle. | def does_path_intersect_obstacle_2d(self, obstacle, uav_point, waypoint):
drone_point = uav_point[:-1]
waypoint = waypoint[:-1]
obstacle_point = obstacle.get_point()[:-1]
waypoint_vector = np.subtract(waypoint, drone_point)
obstacle_vector = np.subtract(obstacle_point, drone_point)
obstacle_vector_magnitude = VectorMath.get_vector_magnitude(obstacle_vector)
rejection_vector = VectorMath.get_vector_rejection(obstacle_vector, waypoint_vector)
rejection_vector_magnitude = VectorMath.get_vector_magnitude(rejection_vector)
# Uncomment for DEBUGGING ONLY
print("Waypoint Vector: " + str(waypoint_vector))
print("Obstacle Vector: " + str(obstacle_vector))
print("Rejection Vector: " + str(rejection_vector))
print("Rejection Vector Magnitude: " + str(rejection_vector_magnitude))
print("Obstacle Radius: " + str(obstacle.get_radius()))
print("Distance From Obstacle: " + str(VectorMath.get_vector_magnitude(np.subtract(uav_point, obstacle.get_point()))))
if self.is_obstacle_in_path_of_drone(obstacle_vector, waypoint_vector):
return rejection_vector_magnitude < obstacle.get_radius()
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def does_uav_intersect_obstacle_vertically(self, obstacle, drone_point, waypoint):\n if isinstance(obstacle, StationaryObstacle):\n if drone_point[2] < obstacle.height + Constants.STATIONARY_OBSTACLE_SAFETY_RADIUS:\n return True\n\n return False",
"def inside_obstacle(point, obstacle):\r\n for obs in obstacle:\r\n if point[0] > obs[0][0] and point[0] < obs[0][2] and point[1] > obs[1][0] and point[1] < obs[1][2]:\r\n return 1\r\n return 0",
"def inside_obstacle(point, obstacle):\r\n for obs in obstacle:\r\n if point[0] > obs[0][0] and point[0] < obs[0][2] and point[1] > obs[1][0] and point[1] < obs[1][2]:\r\n return 1\r\n return 0",
"def is_obstacle_in_path_of_drone(self, obstacle_vector, waypoint_vector):\n obstacle_list = obstacle_vector.tolist()\n waypoint_list = waypoint_vector.tolist()\n\n for index in range(len(obstacle_list)):\n if all(item > 0 for item in [-1.0 * obstacle_list[index], waypoint_vector[index]]) or all(item < 0 for item in [-1.0 * obstacle_list[index], waypoint_vector[index]]):\n return False\n\n return True",
"def is_obstacle_in_path(self):\n for obstacle in self.obstacles.tolist():\n print(\"obstacle.get_point():\", obstacle.get_point())\n dist_to_obstacle = VectorMath.get_vector_magnitude(np.subtract(obstacle.get_point(), self.drone.get_point()))\n if dist_to_obstacle < obstacle.get_radius() + Constants.DETECTION_THRESHOLD:\n if isinstance(obstacle, StationaryObstacle):\n paths = self.generate_possible_paths(obstacle)\n\n if len(paths) != 0:\n return True, np.array(paths)\n elif isinstance(obstacle, MovingObstacle):\n pass\n\n return False, None",
"def does_path_intersect_obstacle_3d(self, obstacle, drone_point, waypoint):\n waypoint_vector = np.subtract(waypoint, drone_point)\n obstacle_vector = np.subtract(obstacle.get_point(), drone_point)\n obstacle_vector_magnitude = VectorMath.get_vector_magnitude(obstacle_vector)\n rejection_vector = VectorMath.get_vector_rejection(obstacle_vector, waypoint_vector)\n rejection_vector_magnitude = VectorMath.get_vector_magnitude(rejection_vector)\n\n # Uncomment for DEBUGGING ONLY\n print(\"Waypoint Vector: \" + str(waypoint_vector))\n print(\"Obstacle Vector: \" + str(obstacle_vector))\n print(\"Rejection Vector: \" + str(rejection_vector))\n print(\"Rejection Vector Magnitude: \" + str(rejection_vector_magnitude))\n print(\"Obstacle Radius: \" + str(obstacle.get_radius()))\n print(\"Distance From Obstacle: \" + str(VectorMath.get_vector_magnitude(np.subtract(drone_point, obstacle.get_point()))))\n\n if self.is_obstacle_in_path_of_drone(obstacle_vector, waypoint_vector):\n return rejection_vector_magnitude < Constants.STATIONARY_OBSTACLE_SAFETY_RADIUS\n\n return False",
"def is_approaching(self, other_particle):\n if self.pos_x < other_particle.pos_x:\n d_v_x = self.velocity_x - other_particle.velocity_x\n else:\n d_v_x = other_particle.velocity_x - self.velocity_x\n\n if self.pos_y < other_particle.pos_y:\n d_v_y = self.velocity_y - other_particle.velocity_y\n else:\n d_v_y = other_particle.velocity_y - self.velocity_y\n\n return d_v_x > 0 or d_v_y > 0",
"def check_position(self, player):\n\n # Mid point of the segment defining the goal\n mid = Point.mid_point(self.s_pos, self.e_pos)\n\n # Transposition of this point by the direction vector of the goal\n # to get the direction vector with its origin in the center of the goal\n mid_prime = self.dir + mid\n\n # Creating both needed vectors\n v1 = Vector.v_from_pp(mid, player.pos)\n v2 = Vector.v_from_pp(mid, mid_prime)\n\n # Getting the angle and checking if it is a valid one\n angle = v1.angle(v2)\n\n return self.is_in_interval(-math.pi / 2, math.pi / 2, angle)",
"def has_uav_reached_current_waypoint(self):\n return self.drone.has_reached_waypoint()",
"def check_for_obstacles(self):\n obs = False\n obs_p = []\n for point in self.obstacles:\n if -0.15 <= point[1] <= 0.15: # robot is 178mm wide\n # Obstacles should be less than or equal to 0.2 m away before being detected\n if 0 <= point[0] <= .2:\n obs_p.append(point)\n obs = True\n if obs:\n pos = self.determine_pos_of_obstacle(obs_p)\n data = Obstacle()\n data.x = pos[0]\n data.y = pos[1]\n data.obstacle = True\n self.obs_pub.publish(data)",
"def is_in_obstacle(self, x: float, y: float) -> bool:\n for obstacle in self.obstacles:\n if obstacle.contains_point((x, y)):\n return True\n return False",
"def through_obstacle(line, obstacles):\r\n noofpoints = 20\r\n for i in range(noofpoints):\r\n if inside_obstacle((line[0]+(i*(line[2]-line[0])/noofpoints), line[1]+(i*(line[3]-line[1])/noofpoints)), obstacles) == 1:\r\n return 1\r\n return 0",
"def __contains__(self, item):\n try:\n pos = Vec2(*item)\n return pos.x >= self.origin.x and pos.y >= self.origin.y \\\n and pos.x < self.origin.x + self.size.x \\\n and pos.y < self.origin.y + self.size.y\n except TypeError:\n return False",
"def IsPointInsideMesh2(obj, p, max_dist = 1.84467e+19):\n bResult, point, normal, face = obj.closest_point_on_mesh(p, max_dist)\n p2 = point-p\n v = p2.dot(normal)\n return not(v < 0.0)",
"def goal_occupied(self, view):\n for line in view.obstacles:\n if linesegdist2(line.p1, line.p2, self.goal) < self.radius ** 2:\n return True\n\n for p in view.pedestrians:\n if p.velocity.length2() == 0.0:\n if p.position.distance_to2(self.goal) < p.radius:\n return True\n\n return False",
"def through_obstacle(line, obstacles):\r\n noofpoints = 100\r\n for i in range(noofpoints):\r\n if inside_obstacle((line[0]+(i*(line[2]-line[0])/noofpoints), line[1]+(i*(line[3]-line[1])/noofpoints)), obstacles) == 1:\r\n return 1\r\n return 0",
"def doesArmTouchObstacles(armPos, obstacles):\n for i in range(len(armPos)):\n cur_arm = armPos[i]\n arm_x = [cur_arm[0][0],cur_arm[1][0]]\n arm_y = [cur_arm[0][1],cur_arm[1][1]]\n if (arm_x[0] != arm_x[1]):\n arm_a = (arm_y[1]-arm_y[0])/(arm_x[1]-arm_x[0])\n arm_b = arm_y[1]-arm_a*arm_x[1]\n for i in range(len(obstacles)):\n cur_obs = obstacles[i]\n x_range = np.linspace(arm_x[0],arm_x[1],1000)\n y_range = arm_a * x_range + arm_b\n for j in range(1000):\n cur_x = x_range[j]\n cur_y = y_range[j]\n if(((cur_y-cur_obs[1])**2 +(cur_x-cur_obs[0])**2) <= cur_obs[2]**2):\n return True\n if (arm_x[0] == arm_x[1]):\n for i in range(len(obstacles)):\n cur_obs = obstacles[i]\n y_range = np.linspace(arm_y[0],arm_y[1],1000)\n cur_x = arm_x[0]\n for j in range(1000):\n cur_y = y_range[j]\n if(((cur_y-cur_obs[1])**2 +(cur_x-cur_obs[0])**2) <= cur_obs[2]**2):\n return True\n\n\n #print(obstacles)\n\n return False",
"def __contains__(self, point, e=10e-10):\n v1 = self.vector\n v2 = Vector.createFromTwoPoints(self.point, point)\n return abs(v1.angle - v2.angle) < e",
"def intersects(self):\n match = False\n for i in range(len(self.__points) - 1):\n p1 = self.__points[i]\n p2 = self.__points[i + 1]\n bounds = self.__line_segment(p1, p2)\n if not bounds is None:\n xmin = bounds[0]\n ymin = bounds[1]\n xmax = bounds[0]\n ymax = bounds[1]\n for j in range(len(bounds)):\n if not (j % 2):\n if bounds[j] < xmin:\n xmin = bounds[j]\n elif bounds[j] > xmax:\n xmax = bounds[j]\n else:\n if bounds[j] < ymin:\n ymin = bounds[j]\n elif bounds[j] > ymax:\n ymax = bounds[j]\n x = self.x\n y = self.y\n # TODO: Determine direction, and check two leading edge points; ie. last vector ----> then points are x+width,y+width x+width,y-width\n if x > xmin and x < xmax and y > ymin and y < ymax:\n match = True\n break\n return match",
"def is_point_within(self, x, y):\n return abs(x - self._x_position) <= self._x_length / 2 and abs(y - self._y_position) <= self._y_length / 2",
"def check_shot_on_target(self, shot):\n # Defining a few variables to ease the reading\n # Here we define the x and y interval of the goal's segment\n x_min = min(self.s_pos.x, self.e_pos.x)\n x_max = max(self.s_pos.x, self.e_pos.x)\n\n y_min = min(self.s_pos.y, self.e_pos.y)\n y_max = max(self.s_pos.y, self.e_pos.y)\n\n # Shortening variables names\n o_x = shot.opponent.pos.x\n o_y = shot.opponent.pos.y\n\n # If the angle = pi / 2 or - pi / 2, then tan(angle) is undefined\n # In these cases, the shot is vertical, therefore it is valid\n # iff the x coordinate of the opponent is in the goal's x interval\n if abs(shot.angle) == math.pi / 2:\n return self.is_in_interval(x_min, x_max, o_x)\n\n # If the angle = 0, pi or -pi, then tan(angle) is 0 which can lead to \n # undefined intersection points (if the goal is vertical for example)\n # although there is an intersection point\n # \n # In these cases, the shot is horizontal, therefore it is valid\n # iff the y coordinate of the opponent is in the goal's y interval\n if abs(shot.angle) == math.pi or shot.angle == 0:\n return self.is_in_interval(y_min, y_max, o_y)\n\n # Using tan the least amount of time possible, for this is a slow function\n tan_theta = math.tan(shot.angle)\n\n # Define the LE of the shot\n le1 = LinearEquation(tan_theta, o_y - tan_theta * o_x)\n le2 = None\n\n # If the goal is vertical, finding the intersection point\n # is not possible using the normal way\n #\n # That being said, unless the LE of the shot is vertical too (which it \n # isn't as it is checked before hand) there has to be an intersection point\n # This intersection must happen when at the x coodinate of the goal's segment\n # therefore, it is possible to compute the y coordinate of the intersection by\n # computing the application of the shot's LE on this ex coordinate\n #\n # Then, the resulting y is valid iff it is in the goal's segment interval\n if self.e_pos.x - self.s_pos.x == 0:\n y = le1.apply(self.e_pos.x)\n return self.is_in_interval(y_min, y_max, y)\n\n # The normal way of solving the intersection of these two LEs\n else:\n\n # Shortening variables by computing the coefficient of the goal's LE\n ratio = (self.e_pos.y - self.s_pos.y) / (self.e_pos.x - self.s_pos.x)\n\n # If the lines are parallels (have the same coefficient) return False\n if math.tan(shot.angle) == ratio:\n return False\n\n # Defining the goal's LE\n le2 = LinearEquation(ratio, self.e_pos.y - self.e_pos.x * ratio)\n\n # Finding the intersection point of the two LEs\n # If there isn't one, return False (but there should be one\n # given all the asserts we do before hand, this is just for completion sake)\n p_intersect = le1.intersection(le2)\n if p_intersect == None:\n return False\n\n # If the intersection point's abscissa is in the goal's x interval, then it is\n # a valid abstracted shot going \n return self.is_in_interval(x_min, x_max, p_intersect.x)",
"def inside(self, uv):\n result = self._trimmed.Perform(gp_Pnt2d(uv[0], uv[1]))\n return result == TopAbs_IN",
"def is_inside(inner_path, outer_path):\r\n if not hasattr(inner_path, 'bounding_box'):\r\n inner_path.bounding_box = CutPlanner.bounding_box(inner_path)\r\n if not hasattr(outer_path, 'bounding_box'):\r\n outer_path.bounding_box = CutPlanner.bounding_box(outer_path)\r\n if outer_path.bounding_box[0] > inner_path.bounding_box[0]:\r\n # outer minx > inner minx (is not contained)\r\n return False\r\n if outer_path.bounding_box[1] > inner_path.bounding_box[1]:\r\n # outer miny > inner miny (is not contained)\r\n return False\r\n if outer_path.bounding_box[2] < inner_path.bounding_box[2]:\r\n # outer maxx < inner maxx (is not contained)\r\n return False\r\n if outer_path.bounding_box[3] < inner_path.bounding_box[3]:\r\n # outer maxy < inner maxy (is not contained)\r\n return False\r\n if outer_path.bounding_box == inner_path.bounding_box:\r\n if outer_path == inner_path: # This is the same object.\r\n return False\r\n if not hasattr(outer_path, 'vm'):\r\n outer_path = Polygon([outer_path.point(i / 100.0, error=1e4) for i in range(101)])\r\n vm = VectorMontonizer()\r\n vm.add_cluster(outer_path)\r\n outer_path.vm = vm\r\n for i in range(101):\r\n p = inner_path.point(i / 100.0, error=1e4)\r\n if not outer_path.vm.is_point_inside(p.x, p.y):\r\n return False\r\n return True",
"def is_obstacle(self, pos: tuple):\n if self.within_map(pos):\n return self.map[round(pos[0]), round(pos[1])] == OBSTACLE\n else:\n return False",
"def __check_obstacle_intersections(self, goal):\n # generate a proximity test geometry for the goal\n min_clearance = self.cfg[\"goal\"][\"min_clearance\"]\n n = 6 # goal is n sided polygon\n goal_test_geometry = []\n for i in range(n):\n goal_test_geometry.append(\n [goal[0] + min_clearance * cos(i * 2 * pi / n),\n goal[1] + min_clearance * sin(i * 2 * pi / n)])\n goal_test_geometry = Polygon(goal_test_geometry)\n intersects = False\n for obstacle in self.current_obstacles:\n intersects |= geometrics.convex_polygon_intersect_test(goal_test_geometry, obstacle.global_geometry)\n return intersects",
"def _detect_obstacles(self):\n def _distance(point, line_point1, line_point2):\n \"\"\"calcuate the distance between a point and a line\"\"\"\n vec1 = line_point1 - point\n vec2 = line_point2 - point\n distance = np.abs(np.cross(vec1,vec2)) / np.linalg.norm(line_point1-line_point2)\n return distance\n\n def _acute_angle(point, line_point1, line_point2):\n \"\"\"detetrmine if the point is whithin the boundary of the line through law of cosines\"\"\"\n base_line = np.linalg.norm(line_point1-line_point2)\n assert base_line > 0, \"check the library useage\"\n line1 = np.linalg.norm(point - line_point1)\n line2 = np.linalg.norm(point - line_point2)\n cos_angle_1 = (base_line**2 + line1**2 - line2**2)/(2*base_line*line1)\n cos_angle_2 = (base_line**2 + line2**2 - line1**2)/(2*base_line*line2)\n if cos_angle_1 * cos_angle_2 > 0:\n return True\n else:\n return False\n\n if self.obstacles != \"None\": # if user assigned some obstacles\n for line in self.env_config: \n line_point1, line_point2 = np.array(line[0]), np.array(line[1])\n point = np.array(self.state[:2])\n distance = _distance(point, line_point1, line_point2)\n acute_angle = _acute_angle(point, line_point1, line_point2)\n if distance <= 0.02 and acute_angle:\n self.adsorption = True\n break\n else:\n self.adsorption = False",
"def interior_contains(self, Vobj):\n try:\n if Vobj.is_vector(): # assume we were passed a point\n return self.polyhedron()._is_positive( self.eval(Vobj) ) \n except AttributeError:\n pass\n \n if Vobj.is_line(): \n return self.polyhedron()._is_zero( self.eval(Vobj) )\n elif Vobj.is_vertex(): \n return self.polyhedron()._is_positive( self.eval(Vobj) ) \n else: # Vobj.is_ray()\n return self.polyhedron()._is_nonneg( self.eval(Vobj) )",
"def has_intersection(self, obj):\r\n obj_x, obj_y = obj.get_location()\r\n x = self.__x\r\n y = self.__y\r\n # Distance formula\r\n distance = sqrt((obj_x - x) ** 2 + (obj_y - y) ** 2)\r\n if distance <= obj.get_radius() + self.__radius:\r\n return True\r\n return False",
"def contains(self, Vobj):\n try:\n if Vobj.is_vector(): # assume we were passed a point\n return self.polyhedron()._is_nonneg( self.eval(Vobj) ) \n except AttributeError:\n pass\n \n if Vobj.is_line(): \n return self.polyhedron()._is_zero( self.eval(Vobj) )\n else:\n return self.polyhedron()._is_nonneg( self.eval(Vobj) )",
"def isInPlane(self, p) -> bool:\n # Testing for zero is done with math.isclose, to avoid rounding/floating point errors.\n # Since we are testing near zero, abs_tol is set to 1e-09\n return math.isclose(\n math.fabs(\n dot(\n self.normal(),\n Vector.connect(p.x, p.y, p.z, self.p0.x, self.p0.y, self.p0.z),\n )\n ),\n 0,\n rel_tol=1e-09,\n abs_tol=1e-09,\n )"
] | [
"0.7454063",
"0.7005391",
"0.7005391",
"0.69903654",
"0.6773237",
"0.66110426",
"0.6502717",
"0.6439229",
"0.64051235",
"0.6336705",
"0.6297145",
"0.6216562",
"0.621584",
"0.6154906",
"0.61373097",
"0.6084647",
"0.6082654",
"0.6072066",
"0.60683346",
"0.60457075",
"0.6034389",
"0.60234964",
"0.60185206",
"0.5999616",
"0.5978637",
"0.5978229",
"0.59609795",
"0.59457964",
"0.5936145",
"0.59358144"
] | 0.7407062 | 1 |
Looks at the signs of the components of the vectors to determine if the direction of the obstacle is in the same direction as the waypoint (quadrants) | def is_obstacle_in_path_of_drone(self, obstacle_vector, waypoint_vector):
obstacle_list = obstacle_vector.tolist()
waypoint_list = waypoint_vector.tolist()
for index in range(len(obstacle_list)):
if all(item > 0 for item in [-1.0 * obstacle_list[index], waypoint_vector[index]]) or all(item < 0 for item in [-1.0 * obstacle_list[index], waypoint_vector[index]]):
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __check_direction(self, vector, coordinate):\n inverse_vector = -vector[0], -vector[1]\n # Calculate hits to direction\n hits = self.__direction(vector,1,coordinate)\n if hits == 5:\n return True\n # After reaching the end, add hits towards the opposite direction\n hits = self.__direction(inverse_vector,hits,coordinate)\n if hits == 5:\n return True",
"def does_path_intersect_obstacle_2d(self, obstacle, uav_point, waypoint):\n drone_point = uav_point[:-1]\n waypoint = waypoint[:-1]\n obstacle_point = obstacle.get_point()[:-1]\n\n waypoint_vector = np.subtract(waypoint, drone_point)\n obstacle_vector = np.subtract(obstacle_point, drone_point)\n obstacle_vector_magnitude = VectorMath.get_vector_magnitude(obstacle_vector)\n rejection_vector = VectorMath.get_vector_rejection(obstacle_vector, waypoint_vector)\n rejection_vector_magnitude = VectorMath.get_vector_magnitude(rejection_vector)\n\n # Uncomment for DEBUGGING ONLY\n print(\"Waypoint Vector: \" + str(waypoint_vector))\n print(\"Obstacle Vector: \" + str(obstacle_vector))\n print(\"Rejection Vector: \" + str(rejection_vector))\n print(\"Rejection Vector Magnitude: \" + str(rejection_vector_magnitude))\n print(\"Obstacle Radius: \" + str(obstacle.get_radius()))\n print(\"Distance From Obstacle: \" + str(VectorMath.get_vector_magnitude(np.subtract(uav_point, obstacle.get_point()))))\n\n if self.is_obstacle_in_path_of_drone(obstacle_vector, waypoint_vector):\n return rejection_vector_magnitude < obstacle.get_radius()\n\n return False",
"def check_position(self, player):\n\n # Mid point of the segment defining the goal\n mid = Point.mid_point(self.s_pos, self.e_pos)\n\n # Transposition of this point by the direction vector of the goal\n # to get the direction vector with its origin in the center of the goal\n mid_prime = self.dir + mid\n\n # Creating both needed vectors\n v1 = Vector.v_from_pp(mid, player.pos)\n v2 = Vector.v_from_pp(mid, mid_prime)\n\n # Getting the angle and checking if it is a valid one\n angle = v1.angle(v2)\n\n return self.is_in_interval(-math.pi / 2, math.pi / 2, angle)",
"def sameDirection(cls, *vectors, e=10e-10):\n l = len(vectors)\n if l == 2:\n v1 = vectors[0]\n v2 = vectors[1]\n return (abs(v1.angle - v2.angle) % (2 * math.pi)) < e\n else:\n for i in range(l):\n for j in range(i + 1, l):\n if not cls.sameDirection(vectors[i], vectors[j]):\n return False\n return True",
"def does_path_intersect_obstacle_3d(self, obstacle, drone_point, waypoint):\n waypoint_vector = np.subtract(waypoint, drone_point)\n obstacle_vector = np.subtract(obstacle.get_point(), drone_point)\n obstacle_vector_magnitude = VectorMath.get_vector_magnitude(obstacle_vector)\n rejection_vector = VectorMath.get_vector_rejection(obstacle_vector, waypoint_vector)\n rejection_vector_magnitude = VectorMath.get_vector_magnitude(rejection_vector)\n\n # Uncomment for DEBUGGING ONLY\n print(\"Waypoint Vector: \" + str(waypoint_vector))\n print(\"Obstacle Vector: \" + str(obstacle_vector))\n print(\"Rejection Vector: \" + str(rejection_vector))\n print(\"Rejection Vector Magnitude: \" + str(rejection_vector_magnitude))\n print(\"Obstacle Radius: \" + str(obstacle.get_radius()))\n print(\"Distance From Obstacle: \" + str(VectorMath.get_vector_magnitude(np.subtract(drone_point, obstacle.get_point()))))\n\n if self.is_obstacle_in_path_of_drone(obstacle_vector, waypoint_vector):\n return rejection_vector_magnitude < Constants.STATIONARY_OBSTACLE_SAFETY_RADIUS\n\n return False",
"def is_solved(self):\n return self.to_grid == self.from_grid",
"def is_solved(self):\n return (self.from_grid == self.to_grid)",
"def checkDirection(neighbour, current_point, end):\n\n for i in range(3):\n delta = abs(end[i] - current_point[i])\n if abs(end[i] - neighbour[i]) < delta and delta >= 0:\n return True, i\n\n return False, None",
"def is_solved(self):\n return self.from_grid == self.to_grid",
"def is_solved(self):\n return self.from_grid == self.to_grid",
"def is_solved(self):\n return self.from_grid == self.to_grid",
"def is_same_waypoint(self, wp1, wp2, max_d=0.5, max_v=0.5):\n dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)\n ddif = dl(wp1.pose.pose.position, wp2.pose.pose.position)\n if ddif < max_d:\n return True\n return False",
"def _hasChangedDirection(motionPts: list) -> tuple:\n dispPts = Ball._getDisplacements(motionPts)\n xDir = yDir = None\n xChange = yChange = False\n for dispPt in dispPts:\n # Compute differences\n xDirNow = RIGHT if dispPt[0] > 0 else LEFT\n yDirNow = DOWN if dispPt[1] > 0 else UP\n # Look for x changes\n if xDir is None:\n xDir = xDirNow\n elif xDirNow != xDir:\n xChange = True\n # Look for y changes\n if yDir is None:\n yDir = yDirNow\n elif yDirNow != yDir:\n yChange = True\n return xChange, yChange",
"def is_obstacle_in_path(self):\n for obstacle in self.obstacles.tolist():\n print(\"obstacle.get_point():\", obstacle.get_point())\n dist_to_obstacle = VectorMath.get_vector_magnitude(np.subtract(obstacle.get_point(), self.drone.get_point()))\n if dist_to_obstacle < obstacle.get_radius() + Constants.DETECTION_THRESHOLD:\n if isinstance(obstacle, StationaryObstacle):\n paths = self.generate_possible_paths(obstacle)\n\n if len(paths) != 0:\n return True, np.array(paths)\n elif isinstance(obstacle, MovingObstacle):\n pass\n\n return False, None",
"def vector_equal(v1,v2):\n if (v2.x - 0.001 <= v1.x <= v2.x + 0.001) and \\\n (v2.y - 0.001 <= v1.y <= v2.y + 0.001) and \\\n (v2.z - 0.001 <= v1.z <= v2.z + 0.001):\n return True",
"def _point_in_tri(self, pos, tri):\n signs = np.sign([np.cross(tri[np.mod(i + 1, 3)] - tri[i],\n pos - tri[i]) for i in range(3)])\n if np.all(signs[1:] == signs[0]):\n return True\n else:\n return False",
"def check_shot_direction(self, shot):\n return Vector.v_from_a(shot.angle) * self.dir < 0",
"def R_will_change_direction(point0, point1, point2):\n\n x0, y0 = point0[0], point0[1]\n x1, y1 = point1[0], point1[1]\n x2, y2 = point2[0], point2[1]\n\n try:\n m1 = (x1 - x2) / (y2 - y1)\n m2 = (y2 - y1) / (x2 - x1)\n x3 = ((m2 * x1) - (m1 * x0) - y1 + y0) / (m2 - m1)\n y3 = m1 * (x3 - x0) + y0\n except ZeroDivisionError:\n (x3, y3) = (x0, y1) if y1 == y2 else (x1, y0)\n\n return ((min(x1, x2) <= x3 <= max(x1, x2)) and (min(y1, y2) <= y3 <= max(y1, y2))), (x3, y3)",
"def reached_final_point():\n return all(point.constraints[b.atom_indexes] == b.final_dist\n for b in self.bonds)",
"def condition(o):\n\t\t\tv = o.pos() - self.pos()\n\t\t\treturn v.norm2() < dist2 and abs(angle_diff(v.angle(),self.angle())) < math.radians(45)",
"def isInPlane(self, p) -> bool:\n # Testing for zero is done with math.isclose, to avoid rounding/floating point errors.\n # Since we are testing near zero, abs_tol is set to 1e-09\n return math.isclose(\n math.fabs(\n dot(\n self.normal(),\n Vector.connect(p.x, p.y, p.z, self.p0.x, self.p0.y, self.p0.z),\n )\n ),\n 0,\n rel_tol=1e-09,\n abs_tol=1e-09,\n )",
"def is_straight(distance_travel_x, distance_travel_y):\r\n if (distance_travel_x > 0 and distance_travel_y == 0) or (distance_travel_x == 0 and distance_travel_y > 0):\r\n return True\r\n else:\r\n return False",
"def is_perpendicular_to(self, vector):\n\n if abs(self.dot(vector)) < 0.01:\n return True\n return False",
"def check_directionality_viable(self):\n\n direction_viable = True\n nose_cords, ear_left_cords, ear_right_cords = [], [], []\n for animal_name in self.animal_bp_dict.keys():\n for bp_cord in [\"X_bps\", \"Y_bps\"]:\n bp_list = self.animal_bp_dict[animal_name][bp_cord]\n for bp_name in bp_list:\n bp_name_components = bp_name.split(\"_\")\n bp_name_components = [x.lower() for x in bp_name_components]\n if \"nose\" in bp_name_components:\n nose_cords.append(bp_name)\n elif (\"ear\" in bp_name_components) and (\n \"left\" in bp_name_components\n ):\n ear_left_cords.append(bp_name)\n elif (\"ear\" in bp_name_components) and (\n \"right\" in bp_name_components\n ):\n ear_right_cords.append(bp_name)\n else:\n pass\n\n for cord in [nose_cords, ear_left_cords, ear_right_cords]:\n if len(cord) != len(self.animal_bp_dict.keys()) * 2:\n direction_viable = False\n\n if direction_viable:\n nose_cords = [\n nose_cords[i * 2 : (i + 1) * 2]\n for i in range((len(nose_cords) + 2 - 1) // 2)\n ]\n ear_left_cords = [\n ear_left_cords[i * 2 : (i + 1) * 2]\n for i in range((len(ear_left_cords) + 2 - 1) // 2)\n ]\n ear_right_cords = [\n ear_right_cords[i * 2 : (i + 1) * 2]\n for i in range((len(ear_right_cords) + 2 - 1) // 2)\n ]\n\n return direction_viable, nose_cords, ear_left_cords, ear_right_cords",
"def does_uav_intersect_obstacle_vertically(self, obstacle, drone_point, waypoint):\n if isinstance(obstacle, StationaryObstacle):\n if drone_point[2] < obstacle.height + Constants.STATIONARY_OBSTACLE_SAFETY_RADIUS:\n return True\n\n return False",
"def in_field(self, vec):\n return (abs(vec[0]) + abs(vec[1]) + abs(vec[2])) <= 2 * self.n",
"def near_way(self):\r\n\r\n prey_position = np.array(self.prey.position)\r\n actual_position = np.array(self.previous_data[-1])\r\n previous_position = np.array(self.previous_data[-2])\r\n\r\n difference_actual = np.linalg.norm(prey_position - actual_position)\r\n difference_previous = np.linalg.norm(prey_position - previous_position)\r\n\r\n if difference_actual < difference_previous:\r\n return True\r\n else:\r\n return False",
"def IsPointInsideMesh(MeshObj, PointInObjectSpace):\n #direction is irellevant unless mesh is REALLY wierd shaped\n direction = mathutils.Vector((1,0,0)) \n epsilon = direction * 1e-6 \n count = 0 \n result, PointInObjectSpace, normal, index = MeshObj.ray_cast(PointInObjectSpace, direction) \n while result: \n count += 1 \n result, PointInObjectSpace, normal, index = MeshObj.ray_cast(PointInObjectSpace + epsilon, direction) \n return (count % 2) == 1",
"def arrived(self):\n \"\"\" Responsible for transformations \"\"\"\n \n if self.phase == 1:\n if self.closest_i_could_get is not None:\n return array_equal(self.closest_i_could_get, array([0,0]))\n else: \n return array_equal(self.destination, array([0,0]))\n elif self.phase > 1:\n if self.closest_i_could_get is not None:\n return array_equal(self.closest_i_could_get, self.position)\n else: \n return array_equal(self.destination, self.position)",
"def are_torsions_same2(geo, geoi, idxs_lst):\n dtol = 0.09\n same_dihed = True\n for idxs in idxs_lst:\n val = dihedral_angle(geo, *idxs)\n vali = dihedral_angle(geoi, *idxs)\n valip = vali+2.*numpy.pi\n valim = vali-2.*numpy.pi\n vchk1 = abs(val - vali)\n vchk2 = abs(val - valip)\n vchk3 = abs(val - valim)\n if vchk1 > dtol and vchk2 > dtol and vchk3 > dtol:\n same_dihed = False\n return same_dihed"
] | [
"0.7193433",
"0.65259945",
"0.6495801",
"0.6493202",
"0.6424737",
"0.6406378",
"0.640443",
"0.64031774",
"0.6398309",
"0.6398309",
"0.6398309",
"0.62759835",
"0.6256729",
"0.62326354",
"0.6192946",
"0.6183871",
"0.6148293",
"0.61077344",
"0.6098693",
"0.6095345",
"0.60564905",
"0.6047996",
"0.6025313",
"0.60187346",
"0.5989811",
"0.59807545",
"0.5971254",
"0.5966417",
"0.59474695",
"0.5942117"
] | 0.72141993 | 0 |
Return the shortest path from the paths provided. This function assumes that the paths are possible waypoints calculated from the is_obstacle_in_path() function | def get_min_path(self, paths):
shortest_path = paths[0]
shortest_distance = self.get_path_distance(paths[0])
for path in paths[1:]:
distance = self.get_path_distance(path)
if distance < shortest_distance:
shortest_path = path
shortest_distance = distance
return shortest_path | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def shortest_path(env, service, paths):\n for idp, path in enumerate(paths):\n if is_path_free(env.topology, path, service.number_units):\n return True, idp\n return False, env.k_paths # returns false and an index out of bounds if no path is available",
"def constructShortestPath(self):\r\n sp = []\r\n v = self.t\r\n while self.preds[v]: # is not None\r\n sp.append(v)\r\n v = self.preds[v]\r\n sp.append(self.s) # source\r\n sp.reverse() # to have the path from source to dest and not t to s\r\n return sp, self.graph.getCoords(sp)",
"def shortest_path(self, start: str, goal: str) -> Path:\n return next(self.bfs_paths(start, goal), [])",
"def shortest_path(self, source, destination, parameter=None):\n paths = []\n for path in self.graph.shortest_paths(source, destination, parameter):\n paths.append({'hops': path})\n return jsonify({'paths': paths})",
"def shortestPathsInLatency (cls, G, return_paths=False,\n id_connector_character='&'):\n exploded_G = NFFGToolBox.explodeGraphWithPortnodes(G,\n id_connector_character)\n\n exploded_dists = networkx.all_pairs_dijkstra_path_length(exploded_G,\n weight='delay')\n dists, min_dist_pairs = NFFGToolBox.extractDistsFromExploded(G,\n exploded_dists,\n id_connector_character)\n\n if return_paths:\n exploded_paths = networkx.all_pairs_dijkstra_path(exploded_G,\n weight='delay')\n paths = NFFGToolBox.extractPathsFromExploded(exploded_paths,\n min_dist_pairs,\n id_connector_character)\n return paths, dists\n else:\n return dists",
"def get_shortest_as_path(self, routes) -> List[str]:\n if len(routes) <= 0:\n return []\n # start shortest path as the first route's path\n shortest_path = [routes[0]]\n # start the length of the shortest path as that\n # of the first route's path\n min_path = len(routes[0][APTH])\n # iterate through all routes in given list and\n # find the shortest AS Path\n for route in routes:\n r_len = len(route[APTH])\n if r_len < min_path:\n min_path = r_len\n shortest_path = [route]\n elif r_len == min_path:\n shortest_path.append(route)\n return shortest_path",
"def find_shortest_path(self, start, end, path=[]):\n path = path+[start]\n if start == end:\n return path\n shortest_path = []\n for node in self.graph[start]:\n if node not in path:\n newpath = self.find_path(node, end, path)\n if not shortest_path or len(shortest_path) > len(newpath):\n shortest_path = newpath\n return shortest_path if shortest_path else None",
"def shortest_path(N, a_0, a_1=None):\n path = HJ_path(a_1*N, a_0*N)\n path = [c/d/N for c, d in path]\n return path",
"def shortest_path(self, source, target, via=None, weight='length', bbox=None):\n\n if self._graph_backend == 'networkx':\n return networkx.shortest_path(self._graph, source, target, weight=weight)\n else:\n if isinstance(via, list):\n return self._pgr.get_route(source, target, via_nodes=via, bbox_nodes=bbox)\n else:\n return self._pgr.get_route(source, target)",
"def shortest_path(graph, source, target):\n return shortest_path_recursive(graph, source, target, set())",
"def shortest_path_search(start, successors, is_goal):\r\n if is_goal(start):\r\n return [start]\r\n\r\n explored = set() # set of states we have visited\r\n frontier = [ [start] ] # ordered list of paths we have blazed\r\n while frontier:\r\n path = frontier.pop(0)\r\n s = path[-1]\r\n for (state, action) in successors(s).items():\r\n if state not in explored:\r\n explored.add(state)\r\n path2 = path + [action, state]\r\n if is_goal(state):\r\n return path2\r\n else:\r\n frontier.append(path2)\r\n return Fail",
"def dijkstra_shortest_path(grid_obs, source, dest):\n #------------------------------------\n #\n # Fill and submit this code\n #\n predecessors = {source: float('inf')}\n visited_blocks = {source: 0}\n queue = PQ()\n queue.__setitem__(source, 0)\n goodIndices = []\n\n print len(grid_obs)\n\n for index in range(len(grid_obs)):\n if grid_obs[index] != \"air\":\n goodIndices.append(index)\n\n for index in goodIndices:\n if index != source:\n visited_blocks[index] = float('inf')\n\n while queue:\n blocks_to_go = []\n current_position = queue.smallest()\n del queue[current_position]\n\n for difference in [-81, -1, 1, 81]:\n if (current_position + difference) in goodIndices:\n blocks_to_go.append(current_position + difference)\n\n for block_Index in blocks_to_go:\n gap = visited_blocks[current_position] + 1\n if gap < visited_blocks[block_Index]:\n visited_blocks[block_Index] = gap\n predecessors[block_Index] = current_position\n queue.__setitem__(block_Index, gap)\n\n shortest_paths = []\n while dest != source:\n shortest_paths.append(dest)\n dest = predecessors[dest]\n shortest_paths.append(source)\n shortest_paths.reverse()\n\n return shortest_paths\n #-------------------------------------",
"def get_shortest_as_path(self, routes):\n outroutes = []\n min_val = float('inf');\n # get shortest AS path first\n for r in routes:\n if len(r[MESG][APTH]) < min_val:\n min_val = len(r[MESG][APTH])\n # find all routes with that val\n for r in routes:\n if len(r[MESG][APTH]) == min_val:\n outroutes.append(r)\n\n return outroutes",
"def _shortest_path(G, start, end, sp_cache):\n if (start, end) in SP_TABLE:\n return sp_cache[(start, end)]\n elif (end, start) in SP_TABLE:\n return sp_cache[(end, start)]\n else:\n D, P = _dijkstra(G, start, end)\n path = []\n temp = end\n while 1:\n path.append(end)\n if end == start: break\n end = P[end]\n path.reverse()\n sp_cache[(start, temp)] = path\n return path",
"def shortest_path(M, start, goal):\n\n print(\"shortest path called\")\n\n came_from = {}\n g_score = {}\n came_from[start] = None\n g_score[start] = 0\n open_heap = []\n heappush(open_heap, (0, start))\n\n while open_heap:\n current = heappop(open_heap)[1]\n\n if current == goal:\n break\n\n for neighbor in M.roads[current]:\n new_g_score = g_score[current] + heuristic(M.intersections[current], M.intersections[neighbor])\n\n if neighbor not in g_score or new_g_score < g_score[neighbor]:\n came_from[neighbor] = current\n g_score[neighbor] = new_g_score\n heappush(open_heap, (new_g_score, neighbor))\n\n optimal_path = []\n node = goal\n\n while came_from[node]:\n optimal_path.append(node)\n node = came_from[node]\n else:\n optimal_path.append(node)\n\n optimal_path.reverse()\n\n return optimal_path",
"def get_shortest_as_path(self, routes):\n # filter out any routes that don't have the shortest AS path\n outroutes = routes.copy()\n outroutes.sort(key=lambda r: len(r[MESG][APTH]))\n lowest = len(outroutes[0][MESG][APTH])\n outroutes = list(filter(lambda r: len(r[MESG][APTH]) == lowest, outroutes))\n return outroutes",
"def shortestPath( self, source, target, weight = None ):\n if weight == None:\n return nx.shortest_path(self._G, source, target)\n else:\n return nx.shortest_path(self._G, source, target, weight = weight)",
"def get_shortest(args_array):\n\n node, G, paths_list = args_array\n shortest_score = float(\"inf\")\n path = None\n for pred in G.predecessors(node):\n try:\n path_len,shortest_path = nx.bidirectional_dijkstra(G, node, pred, weight='cost')\n if path_len < shortest_score:\n path = shortest_path\n shortest_score = path_len\n except nx.exception.NetworkXNoPath:\n continue\n if path is not None: paths_list.append(path)\n # done",
"def get_closest_distance_to_path(self, path):\n min_distance_to_line = float(\"inf\")\n for p in path:\n game_path = p[:]\n\n game_path.sort(key = lambda coord: calculate_distance(self, coord))\n point_A = game_path[0] # Closest point out of all the points on the path to to the tower\n\n try:\n point_after_A = p[p.index(point_A) + 1]\n point_before_A = p[p.index(point_A) - 1]\n closest_to_A = min(point_after_A, point_before_A, key = lambda point: calculate_distance(point_A, point))\n except:\n if p.index(point_A) == 0:\n closest_to_A = p[p.index(point_A) + 1]\n \n elif p.index(point_A) == len(p) - 1:\n closest_to_A = p[p.index(point_A) - 1]\n finally:\n if closest_to_A[0] != point_A[0]:\n m = (closest_to_A[1] - point_A[1]) / (closest_to_A[0] - point_A[0])\n else:\n m = 2\n\n b = point_A[1] - m * point_A[0]\n\n closest_distance = abs(-m * self.x + self.y - b) / math.sqrt((-m) ** 2 + 1)\n min_distance_to_line = min(closest_distance, min_distance_to_line)\n \n return min_distance_to_line",
"def bfs_shortest_path(graph: dict=g2, start: str = \"1\", goal: str = \"4\") -> list:\n visited = []\n queue = [[start]]\n\n while queue:\n path = queue.pop(0)\n node = path[-1]\n if node not in visited:\n neighbours = graph[node]\n for neighbour in neighbours:\n new_path = path[:]\n new_path.append(neighbour)\n queue.append(new_path)\n if neighbour == goal:\n return new_path\n visited.append(node)\n # No path\n return [\"No Path\"]",
"def shortest_path(source, target):\n #although lecture checks for goal when a node is popped off the frontier, efficiency of search can be improved\n #by checking for a goal as nodes are ADDED. If goal detected, don't add it to frontier, just return the solution\n #immediately\n\n #create start point\n start = Node(state = source, parent = None, action = None)\n frontier = QueueFrontier()\n frontier.add(start)\n\n #create explored set\n explored = set()\n\n while True:\n #if nothing left in frontier, no path exists\n if frontier.empty():\n return None\n\n #choose a node from the frontier\n node = frontier.remove()\n #if node is goal, we have solution\n\n #add neighbors 2 frontier using function THATS ALR THERE DUMMY\n for (movie, star) in neighbors_for_person(node.state):\n newNode = Node(state = star, parent = node, action=movie)\n if not frontier.contains_state(newNode) and newNode.state not in explored:\n if newNode.state == target:\n #reverse the solution\n solution = []\n while newNode.parent is not None:\n actionTuple = (newNode.action, newNode.state)\n solution.append(actionTuple)\n newNode = newNode.parent\n solution.reverse()\n return solution\n else: frontier.add(newNode)\n\n #mark state as explored\n explored.add(node.state)",
"def get_attack_path(targets, map_, y, x):\n target_path = {}\n for t in targets:\n adjacent = map_.find_adjacent_open_squares(t.y, t.x)\n paths = []\n for (dy, dx) in adjacent:\n path = map_.bfs(y, x, dy, dx)\n if path is not None:\n paths.append(path)\n if not paths:\n continue\n target_path[dy, dx] = (t, min(paths, key=len))\n if not target_path:\n return None, None\n min_len = min([len(p[1]) for p in target_path.values()])\n min_paths = {k: v for (k, v) in target_path.items() if len(v[1]) == min_len}\n for k, v in sorted(min_paths.items()):\n return v[1][0]",
"def shortest_path(self):\n\t\t#dict that will hold the cost of traveling to each station\n\t\t#add the initial cost of the starting station, which is 0\n\t\tD = {0:0}\n\n\t\t#add all of our dict keys (stations) to our queue\n\t\tstation_queue = self.station_graph.keys()\n\n\t\t#sort the keys! since the graph is directed and acyclic, the stations\n\t\t#can be explored one at a time, in order, without having to adjust\n\t\t#for the lowest distance value via priority queue.\n\t\t#\n\t\t#sort them with reverse=True so that they can be popped from the\n\t\t#end of the list instead of from the beginning. This should save\n\t\t#some cpu time.\n\t\tstation_queue.sort(reverse=True)\n\t\twhile len(station_queue) > 0:\n\n\t\t\tstation = station_queue.pop() #grab the next node in the queue\n\n\t\t\tfor next_st, next_cost in self.station_graph[station].iteritems():\n\t\t\t\t#loops through the current station's neighbors, and calculates\n\t\t\t\t#their costs from the starting node, making sure to store\n\t\t\t\t#the lowest cost in our D dict\n\t\t\t\talt = D[station] + next_cost #sum the costs\n\t\t\t\tif not D.has_key(next_st) or alt < D[next_st]:\n\t\t\t\t\t#if there is no cost on record, or if the newly calculated\n\t\t\t\t\t#cost is lower than the currently recorded one, then\n\t\t\t\t\t#record the newly calculated cost as the lowest\n\t\t\t\t\tD[next_st] = alt #set the cost to get to next_st\n\n\t\treturn D[self.final_stop]",
"def shortest_path_search(start, successors, is_goal):\n if is_goal(start): return [start]\n explored = set()\n frontier = [[start]]\n while frontier:\n path = frontier.pop(0)\n s = path[-1]\n for (state,action) in successors(s).items():\n if state not in explored:\n explored.add(state)\n npath = path + [action,state]\n if is_goal(state): return npath\n else: frontier.append(npath)\n return []",
"def shortest_path_search(start, successors, is_goal):\r\n if is_goal(start):\r\n return [start]\r\n explored = set()\r\n frontier = [ [start] ]\r\n while frontier:\r\n path = frontier.pop(0)\r\n s = path[-1]\r\n for (state, action) in successors(s).items():\r\n if state not in explored:\r\n explored.add(state)\r\n path2 = path + [action, state]\r\n if is_goal(state):\r\n return path2\r\n else:\r\n frontier.append(path2)\r\n return Fail",
"def shortest_path_search(start, successors, is_goal):\n if is_goal(start):\n return [start]\n explored = set()\n frontier = [ [start] ] \n while frontier:\n path = frontier.pop(0)\n s = path[-1]\n for (state, action) in successors(s).items():\n if state not in explored:\n explored.add(state)\n path2 = path + [action, state]\n if is_goal(state):\n return path2\n else:\n frontier.append(path2)\n return Fail",
"def FindShortestPath(graph, start, end, path=[]):\n path = path + [start]\n if start == end:\n return path\n if start not in graph:\n return None\n shortest = None\n for node in graph[start]:\n if node not in path:\n newpath = FindShortestPath(graph, node, end, path)\n if newpath:\n if not shortest or len(newpath) < len(shortest):\n shortest = newpath\n return shortest",
"def build_shortest_path(self):\n # Explore paths until all nodes have been visited\n while self.nodes_heap:\n # Grab min distance node\n current_node = heappop(self.nodes_heap)\n # Grab each valid neighbor and update distance if shorter path is found\n for node_label, edge_weight in enumerate(current_node.edges):\n if edge_weight and node_label in self.nodes_heap:\n neighbor = self.nodes_heap[self.nodes_heap.index(node_label)]\n if neighbor.distance > (current_node.distance + edge_weight):\n neighbor.distance = current_node.distance + edge_weight\n neighbor.previous = current_node\n # Make sure our min heap invariant is met before continuing\n heapify(self.nodes_heap)",
"def min_path(self, start, end, maxD=1e309):\n tdist, preceding_node = self.dijkstra(start, maxD)\n dist = tdist[end]\n backpath = [end]\n try:\n while end != start:\n end = preceding_node[end]\n backpath.append(end)\n path = list(reversed(backpath))\n except KeyError:\n path = None\n\n return dist, path",
"def find_shortest_path(graph, start, end, path=[]):\n path = path + [start]\n if start == end:\n return path\n if not graph.has_key(start):\n return None\n shortest = None\n for node in graph[start]:\n if node not in path:\n newpath = find_shortest_path(graph, node, end, path)\n if newpath:\n if not shortest or len(newpath) < len(shortest):\n shortest = newpath\n return shortest"
] | [
"0.7273277",
"0.70496786",
"0.69742703",
"0.6943726",
"0.68671346",
"0.6855859",
"0.68451226",
"0.68320864",
"0.6805442",
"0.67743856",
"0.6726081",
"0.6716487",
"0.6714552",
"0.670571",
"0.669246",
"0.66918075",
"0.665236",
"0.66394556",
"0.6638905",
"0.6598199",
"0.6593292",
"0.659285",
"0.657804",
"0.65729445",
"0.65665996",
"0.65652406",
"0.6552652",
"0.65393376",
"0.6528136",
"0.65234226"
] | 0.81187457 | 0 |
Return the obstacles in the map | def get_obstacles(self):
return self.obstacles | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getObstacles(self):\r\n ausgabeObstacle = self.globalObstaclesList + self.globalHardObstaclesList\r\n self.globalObstaclesList = []\r\n return(ausgabeObstacle)",
"def obstacles(self):\r\n\r\n #Radious arround the head\r\n limit_sight = self.snake_sight\r\n head = self.body[0].position\r\n binary_map_complete = self.complete_mapping()\r\n map_matrix = np.matrix(binary_map_complete)\r\n obstacles = []\r\n\r\n #limits in all directions\r\n left_x = head[0] - limit_sight\r\n right_x = head[0] + limit_sight\r\n up_y = head[1] - limit_sight\r\n down_y = head[1] + limit_sight\r\n\r\n #submatrix with limits size\r\n snake_sight = map_matrix[up_y:down_y+1, left_x:right_x+1]\r\n\r\n #Special cases where the snake approximates to the borders\r\n ##Corners\r\n if left_x < 0 and up_y < 0:\r\n snake_sight = map_matrix[0:down_y+1, 0:right_x+1]\r\n interval_x = [self.limits[0] + left_x, self.limits[0]]\r\n interval_y = [self.limits[1] + up_y, self.limits[1]]\r\n interval_x_matrix = map_matrix[0:down_y+1, interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], 0:right_x+1]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_x_y_matrix, interval_y_matrix]\r\n snake_sight = np.c_[interval_x_matrix, snake_sight]\r\n snake_sight = np.r_[temporal, snake_sight] \r\n return snake_sight\r\n \r\n if left_x < 0 and down_y > self.limits[1] - 1:\r\n snake_sight = map_matrix[up_y:self.limits[1], 0:right_x+1]\r\n interval_x = [self.limits[0] + left_x, self.limits[0]]\r\n interval_y = [0, down_y - self.limits[1] + 1]\r\n interval_x_matrix = map_matrix[up_y:self.limits[1], interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], 0:right_x+1]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_x_y_matrix, interval_y_matrix]\r\n snake_sight = np.c_[interval_x_matrix, snake_sight]\r\n snake_sight = np.r_[snake_sight, temporal]\r\n return snake_sight\r\n \r\n if right_x > self.limits[0]-1 and up_y < 0:\r\n snake_sight = map_matrix[0:down_y+1, left_x:self.limits[0]]\r\n interval_x = [0, right_x - self.limits[0] + 1]\r\n interval_y = [self.limits[1] + up_y, self.limits[1]]\r\n interval_x_matrix = map_matrix[0:down_y+1, interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:self.limits[0]]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_y_matrix, interval_x_y_matrix]\r\n snake_sight = np.c_[snake_sight, interval_x_matrix]\r\n snake_sight = np.r_[temporal, snake_sight]\r\n return snake_sight\r\n \r\n if right_x > self.limits[0]-1 and down_y > self.limits[1]-1:\r\n snake_sight = map_matrix[up_y:self.limits[1], left_x:self.limits[0]]\r\n interval_x = [0, right_x - self.limits[0] + 1]\r\n interval_y = [0, down_y - self.limits[1] + 1]\r\n interval_x_matrix = map_matrix[up_y:self.limits[1], interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:self.limits[0]]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_y_matrix, interval_x_y_matrix]\r\n snake_sight = np.c_[snake_sight, interval_x_matrix]\r\n snake_sight = np.r_[snake_sight, temporal]\r\n return snake_sight\r\n\r\n ##Middle\r\n if left_x < 0:\r\n snake_sight = map_matrix[up_y:down_y+1, 0:right_x+1]\r\n interval_x = [self.limits[0] + left_x, self.limits[0]]\r\n interval_x_matrix = map_matrix[up_y:down_y+1, interval_x[0]:interval_x[1]]\r\n snake_sight = np.c_[interval_x_matrix, snake_sight]\r\n return snake_sight\r\n\r\n if right_x > self.limits[0]-1:\r\n snake_sight = map_matrix[up_y:down_y+1, left_x:self.limits[0]]\r\n interval_x = [0, right_x - self.limits[0] + 1]\r\n interval_x_matrix = map_matrix[up_y:down_y+1, interval_x[0]:interval_x[1]]\r\n snake_sight = np.c_[snake_sight, interval_x_matrix]\r\n return snake_sight\r\n\r\n if up_y < 0:\r\n snake_sight = map_matrix[0:down_y+1, left_x:right_x+1]\r\n interval_y = [self.limits[1] + up_y, self.limits[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:right_x+1]\r\n snake_sight = np.r_[interval_y_matrix, snake_sight]\r\n return snake_sight\r\n \r\n if down_y > self.limits[1]-1:\r\n snake_sight = map_matrix[up_y:self.limits[1], left_x:right_x+1]\r\n interval_y = [0, down_y - self.limits[1] + 1]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:right_x+1]\r\n snake_sight = np.r_[snake_sight, interval_y_matrix]\r\n return snake_sight\r\n\r\n return snake_sight",
"def get_map(self) -> list:\n return self.map_obstacle",
"def get_obstacles_map(obstacles, placed_pecies):\n \n #create a mask image to draw the obstacles on\n blocks = np.zeros(ARENA_SIZE[::-1], np.uint8)\n\n #get the grid points where the robot needs to placed\n grid = get_grid(ARENA_SIZE)\n\n #draw the obstacles and their safety region on the map\n for i in obstacles.keys():\n cv2.circle(blocks, i, int(CIRCULAR_SAFETY_FACTOR*BLOCK_SIZE[0]), 129, -1)\n cv2.rectangle(blocks, (i[0]-int(obstacles[i][0]/4), i[1]-int(obstacles[i][1]/4)), (i[0]+int(obstacles[i][0]/4), i[1]+int(obstacles[i][1]/4)), 255, -1)\n\n #draw the obstacles and their safety region on the map\n for i in placed_pecies.keys():\n try:\n if not i == grid[5]:\n cv2.circle(blocks, i, int(CIRCULAR_SAFETY_FACTOR*BLOCK_SIZE[0]), 129, -1)\n else:\n cv2.rectangle(blocks, (int(i[0]-7.4*placed_pecies[i][0]/4), int(i[1]-7.4*placed_pecies[i][1]/4)),\n (int(i[0]+7.4*placed_pecies[i][0]/4), int(i[1]+7.4*placed_pecies[i][1]/4)), 129, -1)\n cv2.rectangle(blocks, (i[0]-int(placed_pecies[i][0]/4), i[1]-int(placed_pecies[i][1]/4)), (i[0]+int(placed_pecies[i][0]/4), i[1]+int(placed_pecies[i][1]/4)), 255, -1)\n except Exception as e:\n print(e)\n\n return cv2.bitwise_not(blocks)",
"def get_obstacles(self, map_server):\n\n self.obstacle_list = []\n for index, element in enumerate(map_server):\n if element > 0:\n self.obstacle_list.append(index)\n return(self.obstacle_list)",
"def place_obstacles():\n #Randomly generate different sized rectangles\n #Soem may overlap, which gives more variety in shape of obstacles\n xvals = np.random.randint(0,self.map_dimensions[1],size=self.N_obstacles)\n yvals = np.random.randint(0,self.map_dimensions[0],size=self.N_obstacles)\n lower_left = zip(xvals,yvals)\n rects = []\n for LL in lower_left:\n x = LL[0]\n y = LL[1]\n wmax = self.map_dimensions[1] - x\n w = np.random.randint(0,wmax,size=1)[0]\n hmax = self.map_dimensions[0] - y\n h = np.random.randint(0,hmax,size=1)[0]\n rects += [(x,y,w,h)]\n self.coordinates__obstacles = rects",
"def generate_obstacles(self):\r\n obstacles = self.get_obstable_metrics\r\n obstacle_arrays = []\r\n\r\n for nb_obstacle in obstacles:\r\n empty_array = np.zeros(shape=(self.WINDOW_HEIGHT,\r\n self.WINDOW_WIDTH))\r\n start_location = 0 if nb_obstacle[2] == 1 else self.WINDOW_HEIGHT\r\n y, x = start_location - 1, nb_obstacle[3]\r\n empty_array[y, x] = -1\r\n\r\n for w_value in range(nb_obstacle[0]):\r\n x_updated = x + w_value\r\n\r\n for h_value in range(nb_obstacle[1]):\r\n if nb_obstacle[2] == 1:\r\n y_updated = y + h_value\r\n else:\r\n y_updated = y - h_value\r\n # Replace Value\r\n empty_array[y_updated, x_updated] = -1\r\n\r\n new_array = self.trim_whitespace(empty_array,\r\n nb_obstacle[2],\r\n self.MIN_GAP)\r\n obstacle_arrays.append(new_array)\r\n\r\n return obstacle_arrays",
"def get_obstacles(image):\n\n ih, iw = image.shape[:2]\n image_copy = image.copy()\n\n #resize the image to the size of arena\n image = cv2.resize(image, ARENA_SIZE, interpolation=cv2.INTER_CUBIC)\n gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n\n #replace all black pixels to white pixels\n gray[np.where(gray == 0)]= 255\n\n #get the thresholded binary image\n ret,threshold = cv2.threshold(gray,200,255,cv2.THRESH_BINARY_INV)\n\n #find all the countours in the binary image\n _, contours, heiarchy = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n\n cont = []\n\n #create a mask to draw contours on\n blocks = mask = np.zeros(threshold.shape[:2], np.uint8)\n\n #create a dictionary to hold image roi of all puzzle peices\n blocks_roi = {}\n\n #iterate through all contours\n for i, c in enumerate(contours[1:]):\n\n #find the minimum area fitting rectangle of the contour\n rect = cv2.minAreaRect(c)\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n\n #create the copy of the mask\n mask_copy = mask.copy()\n\n #draw the rectangle on the mask\n cv2.drawContours(mask_copy, [box], -1, (255,255,255), 3)\n\n #floodfill the rectangle\n cv2.floodFill(mask_copy, None, (0,0), 255)\n mask_inv = cv2.bitwise_not(mask_copy)\n blocks = cv2.add(blocks, mask_inv)\n\n _, contours, heiarchy = cv2.findContours(blocks, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n\n obstacles = {}\n\n for c in contours:\n x,y,w,h = cv2.boundingRect(c)\n obstacles.update({(int(x+w/2), int(y+h/2)): BLOCK_SIZE})\n #obstacles.update({(int(x+w/2), int(y+h/2)): (w, h)}) # for unknown block sizes\n bottom_r = remap((x+w, y+h), ARENA_SIZE, (iw,ih))\n top_l = remap((x, y), ARENA_SIZE, (iw,ih))\n blocks_roi.update({(int(x+w/2), int(y+h/2)): image_copy[top_l[1]:bottom_r[1], top_l[0]:bottom_r[0]]})\n\n return obstacles, blocks_roi",
"def obstacles(p):\n c1 = np.array([-0.5,-1.])\n r1 = 1.\n c2 = np.array([0.75,0.5])\n r2 = 0.5\n return [\n (p[0] + 2, np.array([1.,0.])), # left\n (2 - p[0], np.array([-1.,0.])), # right\n (p[1] + 1, np.array([0.,1.])), # bottom\n (1 - p[1], np.array([0.,-1.])), # top\n (norm(p - c1) - r1, (p - c1)/norm(p - c1)), # circle 1\n (norm(p - c2) - r2, (p - c2)/norm(p - c2)) # circle 2\n ]",
"def check_for_obstacles(self):\n obs = False\n obs_p = []\n for point in self.obstacles:\n if -0.15 <= point[1] <= 0.15: # robot is 178mm wide\n # Obstacles should be less than or equal to 0.2 m away before being detected\n if 0 <= point[0] <= .2:\n obs_p.append(point)\n obs = True\n if obs:\n pos = self.determine_pos_of_obstacle(obs_p)\n data = Obstacle()\n data.x = pos[0]\n data.y = pos[1]\n data.obstacle = True\n self.obs_pub.publish(data)",
"def draw_obstacles(self):\n for obstacle in self.obstacles:\n obstacle.draw(self.window, Colors.BLACK.value)",
"def _find_obstacle(self, obstacle_type='*traffic_light*'): \r\n obst = list()\r\n \r\n _actors = self._world.get_actors()\r\n _obstacles = _actors.filter(obstacle_type)\r\n\r\n\r\n for _obstacle in _obstacles:\r\n trigger = _obstacle.trigger_volume\r\n\r\n _obstacle.get_transform().transform(trigger.location)\r\n \r\n distance_to_car = trigger.location.distance(self._vehicle.get_location())\r\n\r\n a = np.sqrt(\r\n trigger.extent.x ** 2 +\r\n trigger.extent.y ** 2 +\r\n trigger.extent.z ** 2)\r\n b = np.sqrt(\r\n self._vehicle.bounding_box.extent.x ** 2 +\r\n self._vehicle.bounding_box.extent.y ** 2 +\r\n self._vehicle.bounding_box.extent.z ** 2)\r\n\r\n s = a + b + 10\r\n \r\n if distance_to_car <= s:\r\n # the actor is affected by this obstacle.\r\n obst.append(_obstacle)\r\n\r\n \"\"\"self._debug.draw_box(carla.BoundingBox(_obstacle.get_transform().location, carla.Vector3D(0.5,0.5,2)),\r\n _obstacle.get_transform().rotation, \r\n 0.05, \r\n carla.Color(255,255,0,0),\r\n 0\r\n )\"\"\"\r\n \"\"\"self._debug.draw_box(carla.BoundingBox(trigger.location, carla.Vector3D(0.1,0.1,10)),\r\n _obstacle.get_transform().rotation, \r\n 0.05, \r\n carla.Color(255,0,0,0),\r\n 0\r\n )\"\"\"\r\n \r\n \"\"\"self._debug.draw_box(carla.BoundingBox(trigger.location, carla.Vector3D(0.1,0.1,2)),\r\n _obstacle.get_transform().rotation, \r\n 0.05, \r\n carla.Color(255,0,0,0),\r\n 0\r\n )\"\"\"\r\n \"\"\"self._debug.draw_box(trigger,\r\n _obstacle.get_transform().rotation, \r\n 0.05, \r\n carla.Color(255,0,0,0),\r\n 0\r\n )\"\"\"\r\n\r\n return obst",
"def obstacle_iterator(self):\n for obstacle in self.tmx_data.get_layer_by_name(\"obstacles\"):\n yield obstacle",
"def create_obstacles(self) -> List[Square]:\n obstacles_number = random.randint(1, self.maximum_obstacles_on_board)\n obstacles = list()\n\n while len(obstacles) < obstacles_number:\n\n obstacle_x_pos = random.randint(0, Dimension.board_width() - 1)\n obstacle_y_pos = random.randint(0, Dimension.board_height() - 1)\n obstacle = Square(obstacle_x_pos, obstacle_y_pos)\n if obstacle not in obstacles:\n self.board_matrix[obstacle_y_pos][obstacle_x_pos] = 0\n obstacles.append(obstacle)\n\n return obstacles",
"def calcGlobalObstaclePosition(self, obstacles): \r\n global_obstacle_list = []\r\n for obstacle in obstacles: \r\n #Wandeln Winkeldaten für Globalberechnung: -90zu+90 und +90zu-90 0=0\r\n #ScanList[i][0]=degrees(asin(sin(radians(ScanList[i][0])+radians(180))))\r\n\r\n Dx = obstacle[0]\r\n Dy = obstacle[1]\r\n\r\n #Drehmatrix für X, Returns Global Hindernis Position\r\n X=(Dx*cos(radians(self.global_kurs))+Dy*(-sin(radians(self.global_kurs))))+self.RoboPosX\r\n #Drehmatrix für Y, Returns Global Hindernis Position\r\n Y=(Dx*sin(radians(self.global_kurs))+Dy*(cos(radians(self.global_kurs))))+self.RoboPosY\r\n\r\n global_obstacle_list.append([int(X),int(Y)])\r\n return(global_obstacle_list)",
"def init_map(self, obstacle_rate=0.9):\n n = self.size()\n\n map_obstacles = [] # np.zeros((n, n)) # 1: obstacle, 0: non-obstacle\n \n for i in range(n):\n # We only need 2 bit to encode 1/0 for each element of NumberArray\n row = NumberArray(2, n)\n for j in range(n):\n if i == j:\n # map_obstacles[i][j] = 0\n row[j] = 0\n elif i > j:\n # map_obstacles[i][j] = map_obstacles[j][i]\n row[j] = map_obstacles[j][i]\n else:\n # map_obstacles[i][j] = 1 if random.random() > 0.9 else 0\n row[j] = 1 if random.random() > obstacle_rate else 0\n map_obstacles.append(row)\n\n self.map_obstacle = map_obstacles",
"def get_near(self,map):\n near_cells = []\n for i in range(self.x-1, self.x+2):\n for j in range(self.y-1, self.y+2):\n if(i>=0 and i<map.size and j>=0 and j<map.size): near_cells.append(map.search(i,j))\n return near_cells",
"def find_open_tiles(self, arena, units):\r\n tiles = []\r\n for x, y in [(self.x+1, self.y), (self.x, self.y+1), (self.x-1, self.y), (self.x, self.y-1)]:\r\n if arena[x][y] == '.':\r\n tiles.append((x, y))\r\n return tiles",
"def draw_obstacles():\n for obstacle in obstacles:\n plt.gca().add_patch(obstacle)",
"def _detect_obstacles(self):\n def _distance(point, line_point1, line_point2):\n \"\"\"calcuate the distance between a point and a line\"\"\"\n vec1 = line_point1 - point\n vec2 = line_point2 - point\n distance = np.abs(np.cross(vec1,vec2)) / np.linalg.norm(line_point1-line_point2)\n return distance\n\n def _acute_angle(point, line_point1, line_point2):\n \"\"\"detetrmine if the point is whithin the boundary of the line through law of cosines\"\"\"\n base_line = np.linalg.norm(line_point1-line_point2)\n assert base_line > 0, \"check the library useage\"\n line1 = np.linalg.norm(point - line_point1)\n line2 = np.linalg.norm(point - line_point2)\n cos_angle_1 = (base_line**2 + line1**2 - line2**2)/(2*base_line*line1)\n cos_angle_2 = (base_line**2 + line2**2 - line1**2)/(2*base_line*line2)\n if cos_angle_1 * cos_angle_2 > 0:\n return True\n else:\n return False\n\n if self.obstacles != \"None\": # if user assigned some obstacles\n for line in self.env_config: \n line_point1, line_point2 = np.array(line[0]), np.array(line[1])\n point = np.array(self.state[:2])\n distance = _distance(point, line_point1, line_point2)\n acute_angle = _acute_angle(point, line_point1, line_point2)\n if distance <= 0.02 and acute_angle:\n self.adsorption = True\n break\n else:\n self.adsorption = False",
"def drawpath(self,obstacles):\n for i in obstacles:\n self.distance_map[i[0],i[1]]=44\n print(\"Distance map\")\n print(self.distance_map)\n for i in self.footprint:\n self.distance_map[i[0],i[1]]=88\n print(\"Evaluated path\")\n print(self.distance_map)",
"def find_obstacle_loc(self, obstacle_list):\n\n x_obst = []\n y_obst = []\n #x_obst_append = x_obst.append\n #y_obst_append = y_obst.append\n locs = []\n\n for x in obstacle_list:\n if x < self.width:\n x_obst.append(x*self.resolution + self.resolution/2)\n else:\n x_obst.append((x % self.width)*self.resolution + self.resolution/2)\n\n for y in obstacle_list:\n y_obst.append((y/self.width)*self.resolution + self.resolution/2)\n\n locs = map(lambda x: x, zip(x_obst, y_obst))\n\n return(locs)",
"def __generate_octagon_obstacles(self, world):\n obs_radius = self.cfg[\"obstacle\"][\"octagon\"][\"radius\"]\n obs_min_count = self.cfg[\"obstacle\"][\"octagon\"][\"min_count\"]\n obs_max_count = self.cfg[\"obstacle\"][\"octagon\"][\"max_count\"]\n obs_min_dist = self.cfg[\"obstacle\"][\"octagon\"][\"min_distance\"]\n obs_max_dist = self.cfg[\"obstacle\"][\"octagon\"][\"max_distance\"]\n\n # generate the obstacles\n obstacles = []\n obs_dist_range = obs_max_dist - obs_min_dist\n num_obstacles = randrange(obs_min_count, obs_max_count + 1)\n\n test_geometries = [r.global_geometry for r in world.robots]\n while len(obstacles) < num_obstacles:\n\n # generate position\n dist = obs_min_dist + (random() * obs_dist_range)\n phi = -pi + (random() * 2 * pi)\n x = dist * sin(phi)\n y = dist * cos(phi)\n\n # generate orientation\n theta = -pi + (random() * 2 * pi)\n\n # test if the obstacle overlaps the robots or the goal\n obstacle = OctagonObstacle(obs_radius, Pose(x, y, theta))\n intersects = False\n for test_geometry in test_geometries:\n intersects |= geometrics.convex_polygon_intersect_test(test_geometry, obstacle.global_geometry)\n if not intersects:\n obstacles.append(obstacle)\n return obstacles",
"def random_map(self, world):\n obstacles = []\n if self.cfg[\"obstacle\"][\"octagon\"][\"enabled\"]:\n obstacles += self.__generate_octagon_obstacles(world)\n if self.cfg[\"obstacle\"][\"rectangle\"][\"enabled\"]:\n obstacles += self.__generate_rectangle_obstacles(world)\n\n # update the current obstacles and goal\n self.current_obstacles = obstacles\n self.add_new_goal()\n\n # apply the new obstacles and goal to the world\n self.apply_to_world(world)",
"def obstacles_form(self,image):\r\n major_axis=60\r\n minor_axis=30\r\n c_y=246\r\n c_x=145\r\n c_y1=90\r\n c_x1=70\r\n radius=35\r\n for i in range(len(image)):\r\n for j in range(len(image[0])):\r\n\r\n #self.ellipse(image,major_axis,minor_axis,i,j,c_x,c_y)\r\n self.circle(image,100,i,j,200,200)\r\n self.circle(image,100,i,j,800,200)\r\n #self.slanted_rect(image,i,j)\r\n self.boundary(image,i,j)\r\n self.boundary1(image,i,j)\r\n self.boundary2(image,i,j)\r\n self.c_shape(image,i,j)\r\n #exploration.c_shape(image,i,j)\r",
"def detect_object(world):\n # create the map with only the obstucale to non-zero\n world_hsv = cv2.cvtColor(world, cv2.COLOR_BGR2HSV)\n mask_red = cv2.inRange(world_hsv, low_red, up_red)\n occupancy_grid = np.array(mask_red)\n world_rows, world_cols, _ = world.shape\n\n # create the mask in order to find the goal\n world_hsv = cv2.cvtColor(world, cv2.COLOR_BGR2HSV)\n mask_goal = cv2.inRange(world_hsv, low_blue, up_blue)\n goal_x, goal_y = (15, 15) # goal by default\n\n # look for the obstacle and increase there size\n for i in range(world_rows):\n for j in range(world_cols):\n occupancy_grid[i][j] = int(occupancy_grid[i][j] / 255)\n if mask_goal[i][j] > 200:\n goal_x, goal_y = (i, j)\n object_grid = [[goal_x, goal_y]]\n return object_grid, occupancy_grid",
"def generate_nearby_cells(self):\n for y in range(len(self.island_map)):\n for x in range(len(self.island_map[y])):\n list_of_nearby_cells = []\n\n if y != 0:\n self.generate_cell_above(x, y, list_of_nearby_cells)\n\n if x != 0:\n self.generate_cell_left(x, y, list_of_nearby_cells)\n\n if y != len(self.island_map)-1:\n self.generate_cell_below(x, y, list_of_nearby_cells)\n\n if x != len(self.island_map[y])-1:\n self.generate_cell_right(x, y, list_of_nearby_cells)\n\n self.island_map[y][x].nearby_cells = list_of_nearby_cells",
"def updateHardObstacles(self):\r\n global_obs = self.calcGlobalObstaclePosition([[10, 20],[10, 0],[10, -20]])\r\n self.globalHardObstaclesList.extend(global_obs)",
"def __generate_rectangle_obstacles(self, world):\n obs_min_dim = self.cfg[\"obstacle\"][\"rectangle\"][\"min_dim\"]\n obs_max_dim = self.cfg[\"obstacle\"][\"rectangle\"][\"max_dim\"]\n obs_max_combined_dim = self.cfg[\"obstacle\"][\"rectangle\"][\"max_combined_dim\"]\n obs_min_count = self.cfg[\"obstacle\"][\"rectangle\"][\"min_count\"]\n obs_max_count = self.cfg[\"obstacle\"][\"rectangle\"][\"max_count\"]\n obs_min_dist = self.cfg[\"obstacle\"][\"rectangle\"][\"min_distance\"]\n obs_max_dist = self.cfg[\"obstacle\"][\"rectangle\"][\"max_distance\"]\n\n # generate the obstacles\n obstacles = []\n obs_dim_range = obs_max_dim - obs_min_dim\n obs_dist_range = obs_max_dist - obs_min_dist\n num_obstacles = randrange(obs_min_count, obs_max_count + 1)\n\n test_geometries = [r.global_geometry for r in world.robots]\n while len(obstacles) < num_obstacles:\n # generate dimensions\n width = obs_min_dim + (random() * obs_dim_range )\n height = obs_min_dim + (random() * obs_dim_range )\n while width + height > obs_max_combined_dim:\n height = obs_min_dim + (random() * obs_dim_range )\n\n # generate position\n dist = obs_min_dist + (random() * obs_dist_range)\n phi = -pi + (random() * 2 * pi)\n x = dist * sin(phi)\n y = dist * cos(phi)\n\n # generate orientation\n theta = -pi + (random() * 2 * pi)\n\n # test if the obstacle overlaps the robots or the goal\n obstacle = RectangleObstacle(width, height, Pose(x, y, theta))\n intersects = False\n for test_geometry in test_geometries:\n intersects |= geometrics.convex_polygon_intersect_test(test_geometry, obstacle.global_geometry)\n if not intersects:\n obstacles.append(obstacle)\n return obstacles",
"def through_obstacle(line, obstacles):\r\n noofpoints = 20\r\n for i in range(noofpoints):\r\n if inside_obstacle((line[0]+(i*(line[2]-line[0])/noofpoints), line[1]+(i*(line[3]-line[1])/noofpoints)), obstacles) == 1:\r\n return 1\r\n return 0"
] | [
"0.7732808",
"0.76378906",
"0.7601713",
"0.7597489",
"0.74278224",
"0.7182941",
"0.70999545",
"0.7085734",
"0.70508057",
"0.69266826",
"0.68817693",
"0.68608207",
"0.66608745",
"0.66440624",
"0.6596019",
"0.6556084",
"0.6540522",
"0.6464882",
"0.6458014",
"0.6457657",
"0.6408675",
"0.6387551",
"0.63855827",
"0.6372275",
"0.6287982",
"0.62231493",
"0.62183666",
"0.61687744",
"0.6123037",
"0.6070905"
] | 0.83878875 | 0 |
Return True if the UAV has reached the current waypoint and false if not | def has_uav_reached_current_waypoint(self):
return self.drone.has_reached_waypoint() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_reached_waypoint_goal(self):\n return self.control_instance.check_reached_waypoint_goal()",
"def update(self):\n\n # If the agent has already reached the\n # last waypoint it doesn't need to update\n if self.finished:\n return True\n\n # Skip if the proxy don't have any [new] data\n if (self.pp.info.datatime == 0) or \\\n (self.pp.info.datatime == self.last_read):\n return False\n\n self.last_read = self.pp.info.datatime\n\n # If this is the first update then head toward the first waypoint\n if self.first_update:\n self.pp.set_cmd_pose(self.active_waypoint['x'],\n self.active_waypoint['y'],\n self.get_heading({'x': self.pp.px, 'y': self.pp.py}, self.active_waypoint),\n 1)\n self.first_update = False\n return False\n\n # Calculate how far the agent is from its current waypoint\n dist = math.hypot(self.pp.px - self.active_waypoint['x'],\n self.pp.py - self.active_waypoint['y'])\n\n # Has it reached it yet?\n if dist < self.waypoint_distance_tolerance:\n\n # If all waypoints have been reached, stop the agent and return True\n if (self.active_waypoint_index + 1) >= len(self.waypoints):\n self.pp.set_cmd_vel(0.0, 0.0, 0.0, 0)\n self.pp.enable(False) # redundant?\n self.finished = True\n return True\n\n # Otherwise select the next waypoint\n prev_waypoint = self.active_waypoint\n self.active_waypoint_index += 1\n self.active_waypoint = self.waypoints[self.active_waypoint_index]\n\n # ...and drive to it\n self.pp.set_cmd_pose(self.active_waypoint['x'],\n self.active_waypoint['y'],\n self.get_heading(prev_waypoint, self.active_waypoint),\n 1)\n\n # Still have waypoints to visit\n return False",
"def goal_reached(self, robot_pose):\n goal = self.global_plan.poses[-1].pose\n return self.calc_distance(robot_pose, goal) < self.goal_dist_threshold",
"def if_goal_reached(self, pose):\n dx = self.pos.x - pose.x\n dy = self.pos.y - pose.y\n dist = math.sqrt(dx ** 2 + dy ** 2)\n return dist < self.radiu",
"def reached(self) -> bool:\n return (time.time() - self._start) >= self.seconds",
"def at_goal(self):\n return self.distance_from_goal < self.robot.wheels.base_length/2",
"def at_goal(self):\n return self.distance_from_goal < self.robot.wheels.base_length/2",
"def passed_waypoint(self, waypoint_num):\n bools = self.ros_node.get_data('/diff_drive/waypoints_achieved', simple_data = False)\n # Waits for the data\n if bools is not None:\n if len(bools.bools) >= waypoint_num:\n return bools.bools[waypoint_num -1]\n \n rospy.logerr_throttle(15, \"Checking Waypoint Failed. Did not find a waypoint with the number '%s' in the path\" %(waypoint_num))\n return False\n else:\n return False",
"def goingToBreak(self):\n \n if (\n (self.current_loc == 0 and not self.direction_forward) or\n (self.current_loc == len(self.destinations)-1 and self.direction_forward)\n ):\n return True\n return False",
"def _update_next_waypoint(self):\n if not self.base_waypoints:\n #rospy.logwarn(\"Waypoints not updated: base_waypoints not available yet.\")\n return False\n\n if not self.current_pose:\n #rospy.logwarn(\"Waypoints not updated: current_pose not available yet.\")\n return False\n\n # Get ego car variables\n ego_x = self.current_pose.position.x\n ego_y = self.current_pose.position.y\n ego_theta = math.atan2(self.current_pose.orientation.y, self.current_pose.orientation.x)\n\n # If I do have a next_waypoint, I will start looking from it, and stop looking\n # as soon as get a local minimum. Otherwise I will do a full search across the whole track\n t = time.time()\n wp = None\n yaw = 0\n dist = 1000000 # Long number\n if self.next_waypoint:\n idx_offset = self.next_waypoint\n full_search = False\n else:\n idx_offset = 0\n full_search = True\n num_base_wp = len(self.base_waypoints)\n\n for i in range(num_base_wp):\n idx = (i + idx_offset)%(num_base_wp)\n wp_x = self.base_waypoints[idx].pose.pose.position.x\n wp_y = self.base_waypoints[idx].pose.pose.position.y\n wp_d = math.sqrt((ego_x - wp_x)**2 + (ego_y - wp_y)**2)\n\n if wp_d < dist:\n dist = wp_d\n wp = idx\n if debugging:\n # Angle betwee car heading and waypoint heading\n yaw = math.atan2(wp_y - ego_y, wp_x - ego_x) - ego_theta\n elif not full_search:\n # Local minimum. If the waypoint makes sense, just use it and break\n if dist < max_local_distance:\n break; # Found a point\n else:\n # Seem to have lost track. Do search again\n rospy.logwarn(\"Waypoint updater lost track (local min at %.1f m after %d waypoints). Going back to full search.\", dist, i+1)\n full_search = True\n\n if debugging:\n rospy.loginfo(\"New next wp [%d] -> (%.1f,%.1f) after searching %d points in %fs\", wp, dist * math.cos(yaw), dist * math.sin(yaw), i, time.time()-t)\n\n if wp is None:\n rospy.logwarn(\"Waypoint updater did not find a valid waypoint\")\n return False\n\n self.next_waypoint = wp\n return True",
"def iswalking(self):\r\n return self.model.coord!=self.model.targetcoord",
"def goal_reached(self, position):\n return position >= self.goal",
"def reached_goal(self):\n for i in range(self.simulator_.num_agents):\n if rvo_math.abs_sq(self.simulator_.agents_[i].position_ - self.goals_[i]) > self.simulator_.agents_[i].radius_ * self.simulator_.agents_[i].radius_:\n return False\n\n return True",
"def is_at_goal(self):\n return self._current_loc.get_row() == BoardPath._goal_loc.get_row() and \\\n self._current_loc.get_column() == BoardPath._goal_loc.get_column()",
"def reached_angle(self, angle):\n if self.ros_node.get_data(\"/auto/hood/current/angle\") == angle:\n return True\n return False",
"def is_jumping(self):\n if(self.going_down or self.going_up or self.mid_air):\n return True\n else:\n return False",
"def isFinished(self) -> bool:\n\n # Need to convert distance travelled to degrees. The Standard\n # Romi Chassis found here, https://www.pololu.com/category/203/romi-chassis-kits,\n # has a wheel placement diameter (149 mm) - width of the wheel (8 mm) = 141 mm\n # or 5.551 inches. We then take into consideration the width of the tires.\n inchPerDegree = math.pi * 5.551 / 360.0\n\n # Compare distance travelled from start to distance based on degree turn\n return self._getAverageTurningDistance() >= inchPerDegree * self.degrees",
"def check_waypoint_reached(self, pos_tol=0.3, head_tol=0.01):\n self.local_pos_pub.publish(self.waypoint_g)\n\n dx = abs(\n self.waypoint_g.pose.position.x - self.current_pose_g.pose.pose.position.x\n )\n dy = abs(\n self.waypoint_g.pose.position.y - self.current_pose_g.pose.pose.position.y\n )\n dz = abs(\n self.waypoint_g.pose.position.z - self.current_pose_g.pose.pose.position.z\n )\n\n dMag = sqrt(pow(dx, 2) + pow(dy, 2) + pow(dz, 2))\n\n cosErr = cos(radians(self.current_heading_g)) - cos(\n radians(self.local_desired_heading_g)\n )\n\n sinErr = sin(radians(self.current_heading_g)) - sin(\n radians(self.local_desired_heading_g)\n )\n\n dHead = sqrt(pow(cosErr, 2) + pow(sinErr, 2))\n\n if dMag < pos_tol and dHead < head_tol:\n return 1\n else:\n return 0",
"def isFinished(self):\n current = self.robot.drivetrain.get_gyro_angle()\n # If abs(target - current) < threshold then return true\n return math.fabs(self._target_degrees - current) <= self._degree_threshold or self.isTimedOut()",
"def reached_dest(self) -> bool:\n return self.base_route[-1] == self.traveled_nodes[-1][self.NODE_INDEX]",
"def is_on(self) -> bool:\n return self.event.is_tripped",
"def judge_goal(self):\n err_pos = math.sqrt((self.y_des - self.y)**2 +(self.x_des - self.x)**2)\n print(\"t= %s\" % rospy.get_time()+\"-----------\")\n print('destination position=['+str(self.x_des)+','+str(self.y_des)+\"]\")\n print('the current position=['+str(self.x)+','+str(self.y)+\"]\")\n print('the current yaw angle=['+str(self.yaw))\n print('distance to destination='+str(err_pos))\n\n if(err_pos < 0.8):\n print('reach goal!!!!!')\n self.goal_flag=1",
"def ismoving(self):\n return not self.get_par(\"done_moving\")",
"def _has_arrived(self, context) -> bool:\n return self._target[0] == context.x and self._target[1] == context.y",
"def is_driving(self, first: Waypoint, second: Waypoint) -> bool:\n dist = self.calc_distance(first, second)\n time_delta = (second.timestamp - first.timestamp).seconds\n if dist > GPS_DISTANCE_ACCURATE_METERS and time_delta < STOP_TIME_SECONDS:\n return True\n elif GPS_DISTANCE_ACCURATE_METERS < dist < CONNECTION_LOST_DISTANCE_THRESHOLD_METERS and \\\n time_delta < CONNECTION_LOST_TIMEOUT_SECONDS:\n return True\n else:\n return False",
"def _ismoving(self):\n return self.dp.state()==PyTango.DevState.MOVING",
"def is_done(self):\n # Retrieve robot position\n pos = self.robot.getPosition()\n # Check if robot has moved sideways too much\n if abs(pos[0]) > 2.0:\n return True\n # Check if robot has fallen (body too close to the ground)\n elif pos[1] < 0.3:\n return True\n # Check it the robot has reached the end of the track\n elif pos[2] < -20.0:\n return True\n # Check if the robot has walked backwards\n elif pos[2] > 25.0:\n return True\n # No conditions reached, not done yet\n else:\n return False",
"def time_to_move(self):\r\n if int(self.pix_pos.x+TOP_BOTTOM_BUFFER//2) % self.app.cell_width == 0:\r\n if self.direction == vec(1, 0) or self.direction == vec(-1, 0) or self.direction == vec(0, 0):\r\n return True\r\n # for the x-direction\r\n\r\n if int(self.pix_pos.y+TOP_BOTTOM_BUFFER//2) % self.app.cell_height == 0:\r\n if self.direction == vec(0, 1) or self.direction == vec(0, -1) or self.direction == vec(0, 0):\r\n return True\r\n # for the y-direction\r\n\r\n # checks to see if the player is still within the bounds\r",
"def isCurrentPlayerHome(self):\r\n \r\n #creates corresponding starting and ending points for each player\r\n if self.getTurn() == RED:\r\n start = 0\r\n end = 18\r\n else:\r\n start = 6\r\n end = 24\r\n \r\n #checks whether the current player has checkers on corresponding points\r\n for i in range(start, end):\r\n if self.points[i].getTeam() == self.getTurn():\r\n return False\r\n \r\n return True",
"def is_complete(self, vehicle_state, distance_travelled: float) -> bool:\n return self.goal.is_reached(vehicle_state)"
] | [
"0.7815707",
"0.7180824",
"0.70038074",
"0.69606185",
"0.694313",
"0.6907562",
"0.6907562",
"0.6904402",
"0.68095386",
"0.6806599",
"0.6785689",
"0.67554694",
"0.6742048",
"0.67407054",
"0.66793656",
"0.66658723",
"0.65675145",
"0.6564974",
"0.6496703",
"0.648724",
"0.64718044",
"0.64579546",
"0.64021486",
"0.63968456",
"0.63868624",
"0.6379848",
"0.63688874",
"0.6362253",
"0.6357971",
"0.6355502"
] | 0.864633 | 0 |
Hook to be invoked before the test method has been executed. May perform expensive setup here. | def before_test(self, func, *args, **kwargs):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def before_run_tests(cls):\n pass",
"def do_before(self):\r\n pass",
"def beforeTest(self, test):\n self.setupLoghandler()",
"def before(self) -> None:\n pass",
"def startTestHook(self):",
"def setUp(self):\n # use self.attribute to keep anything which needs to be accessed later\n print('setUp method\\n')",
"def setUp(self):\n super(TestCase, self).setUp()\n self._context = CallContext()",
"def setUp(self):\n print('Calling \\'setUp\\'')",
"def setUp(self):\r\n # nothing to do, all tests use different things\r\n pass",
"def setUp(self) -> None:\n pass",
"def setUp(self) -> None:\n pass",
"def setUp(self):\n\n return",
"def setUp(self):\n pass #because we dont have anything to setup.",
"def setUp(self):\n print(\"New test by Nikolay Melnik\")",
"def setUp(self):\n\t\tprint(\"\\n-------------------------------------\\nIn Test_RMT_Util:\", self._testMethodName)",
"def __call__(self, result=None):\n self._pre_setup()\n super(TestCase, self).__call__(result)\n self._post_tearDown()",
"def on_before_execution(self):\n pass",
"def setUp(self):\n\n BaseTest.setUp(self)",
"def setUp(self):\r\n pass # nothing used by all\r",
"def setUp(self) :\n pass",
"def setUp(self):\n raise NotImplementedError",
"def setUp(self):\n GlusterBaseClass.setUp.im_func(self)\n self.test_method_complete = False",
"def setUp_extra(self):\n pass",
"def setUp(self):\n\n pass",
"def setUp(self):\n\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass",
"def setUp(self):\n pass"
] | [
"0.80520755",
"0.7866928",
"0.76243114",
"0.76155716",
"0.749539",
"0.74885553",
"0.7474587",
"0.74741113",
"0.73961705",
"0.7393307",
"0.7393307",
"0.73867905",
"0.7364386",
"0.7364033",
"0.73517734",
"0.7307922",
"0.7301422",
"0.7300426",
"0.7297788",
"0.7283322",
"0.72643584",
"0.7251347",
"0.7248478",
"0.72049904",
"0.72049904",
"0.7200059",
"0.7200059",
"0.7200059",
"0.7200059",
"0.7200059"
] | 0.8013997 | 1 |
Generates fixture objects from the given response and stores them in the applicationspecific cache. | def execute(self, response):
if not has_request_context:
return
self._fallback_fixture_names()
try:
app = self.auto_fixture.app
# Create response fixture
fixture = Fixture.from_response(response, app, self.response_name)
self.auto_fixture.add_fixture(fixture)
# Create request fixture
if request.data:
fixture = Fixture.from_request(request, app, self.request_name)
self.auto_fixture.add_fixture(fixture)
except TypeError: # pragma: no cover
warnings.warn("Could not create fixture for unsupported mime type")
return response | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def api_response():\n return load_fixture(\"smhi.json\", DOMAIN)",
"def fixture_retrieved():\n from aiida.plugins import DataFactory\n from aiida_logger.tests import TEST_DIR\n\n retrieved = DataFactory('folder')()\n retrieved.put_object_from_tree(path=os.path.join(TEST_DIR, 'input_files'))\n\n return retrieved",
"def orlov_fixture(request, workspace, minicap):\n logger.info('Orlov Fixture : setup minicap service and other.')\n request.cls.workspace = workspace\n request.cls.minicap = minicap\n request.cls.evidence_dir = request.cls.workspace.mkdir('tmp\\\\evidence')\n request.cls.video_dir = request.cls.workspace.mkdir('tmp\\\\video')\n yield\n logger.info('Olorv Fixture : teardown minicap service and other.')",
"def api_response_lack_data():\n return load_fixture(\"smhi_short.json\", DOMAIN)",
"def setUp(self):\n cache.clear()\n self.factory = APIRequestFactory()",
"def program_response_fixture() -> dict[str, Any]:\n return cast(dict[str, Any], json.loads(load_fixture(\"program_response.json\")))",
"def fixtures():",
"def store_response_in_cache(responsefile, response):\n global __response_cache\n log.debug(\"Storing data from flats (%s) in cache\" % responsefile)\n __response_cache[responsefile] = {}\n modtime = str(os.path.getmtime(responsefile))\n __response_cache[responsefile][modtime] = response",
"def _Dynamic_Fetch(self, request, response):\n print \"Request:\"\n print (\"Request: {}\").format(request)\n response.set_content(self.mock_response_issue)\n response.set_statuscode(200)\n new_header = response.add_header()\n new_header.set_key('Content-type')\n new_header.set_value('application/json')\n\n response.set_finalurl(request.url)\n response.set_contentwastruncated(False)\n\n # allow to query the object after it is used\n # pylint: disable=attribute-defined-outside-init\n self.request = request\n self.response = response",
"def fixture_test_store(andy, pandy, candy):\n store_ = InMemoryStore[Person](unique_keys={\"name\"})\n store_.add(andy)\n store_.add(pandy)\n store_.add(candy)\n yield store_",
"def mock_api():\n with open(os.path.join(HERE, 'response.json'), 'r') as fp:\n webargs_response = fp.read()\n # A valid package with a proper response\n responses.add(\n responses.GET,\n 'https://pypi.python.org/pypi/webargs/json',\n body=webargs_response,\n content_type='application/json'\n )\n # A valid package with no releases\n with open(os.path.join(HERE, 'response_noreleases.json'), 'r') as fp:\n foo_response = fp.read()\n\n responses.add(\n responses.GET,\n 'https://pypi.python.org/pypi/foo/json',\n body=foo_response,\n content_type='application/json'\n )\n\n # An invalid package name\n responses.add(\n responses.GET,\n 'https://pypi.python.org/pypi/nope/json',\n status=404\n )\n responses.start()\n\n yield responses\n\n responses.stop()",
"def fill_from_api_response(self, api_response):\n pass",
"def test_dataset_response(dataset_response, client):\n file = glob.glob('./**/test_areas.csv', recursive=True)\n with open(file[0], 'r') as fp:\n resp = client.create_dataset(fp)\n assert resp.keys() == dataset_response.keys()",
"def test_api_response_data(self):",
"def fixture_andy():\n yield Person(name=\"Andy\", age=12, hobbies=[\"Star Wars\", \"Bicycles\"])",
"def fixture_candy():\n yield Person(name=\"Candy\", age=13, hobbies=[\"Gardening\"])",
"def fixture_tile_details():\n return {\n \"version\": 1,\n \"revision\": 1,\n \"timestamp\": \"2018-06-19T23:04:39.097Z\",\n \"timestamp_ms\": 1529449479097,\n \"result_code\": 0,\n \"result\": {\n TILE_TILE_UUID: {\n \"thumbnailImage\": \"https://local-tile-pub.s3.amazonaws.com/..\",\n \"tileState\": {\n \"ringStateCode\": 0,\n \"connectionStateCode\": 0,\n \"uuid\": TILE_TILE_UUID,\n \"tile_uuid\": TILE_TILE_UUID,\n \"client_uuid\": TILE_CLIENT_UUID,\n \"timestamp\": 1512615215149,\n \"advertised_rssi\": 1.4e-45,\n \"client_rssi\": 1.4e-45,\n \"battery_level\": 1.4e-45,\n \"latitude\": 21.9083423,\n \"longitude\": -72.4982138,\n \"altitude\": 1821.129812,\n \"h_accuracy\": 5.0,\n \"v_accuracy\": 3.0,\n \"speed\": 1.4e-45,\n \"course\": 1.4e-45,\n \"authentication\": None,\n \"owned\": True,\n \"has_authentication\": None,\n \"lost_timestamp\": -1,\n \"connection_client_uuid\": TILE_CLIENT_UUID,\n \"connection_event_timestamp\": 1512615234268,\n \"last_owner_update\": 1512615215149,\n \"connection_state\": \"READY\",\n \"ring_state\": \"STOPPED\",\n \"is_lost\": False,\n \"voip_state\": \"OFFLINE\",\n },\n \"entityName\": \"TILE\",\n \"tile_uuid\": \"19264d2dffdbca32\",\n \"firmware_version\": \"01.12.14.0\",\n \"owner_user_uuid\": \"2ea56f4d-6576-4b4e-af11-3410cc65e373\",\n \"name\": TILE_TILE_NAME,\n \"category\": None,\n \"image_url\": \"https://local-tile-pub.s3.amazonaws.com/...\",\n \"visible\": True,\n \"is_dead\": False,\n \"hw_version\": \"02.09\",\n \"product\": \"DUTCH1\",\n \"archetype\": \"WALLET\",\n \"configuration\": {\"fw10_advertising_interval\": None},\n \"last_tile_state\": {\n \"ringStateCode\": 0,\n \"connectionStateCode\": 0,\n \"uuid\": \"19264d2dffdbca32\",\n \"tile_uuid\": \"19264d2dffdbca32\",\n \"client_uuid\": \"a01bf97a-c89a-40e2-9534-29976010fb03\",\n \"timestamp\": 1512615215149,\n \"advertised_rssi\": 1.4e-45,\n \"client_rssi\": 1.4e-45,\n \"battery_level\": 1.4e-45,\n \"latitude\": 39.797571,\n \"longitude\": -104.887826,\n \"altitude\": 1588.002773,\n \"h_accuracy\": 5.0,\n \"v_accuracy\": 3.0,\n \"speed\": 1.4e-45,\n \"course\": 1.4e-45,\n \"authentication\": None,\n \"owned\": True,\n \"has_authentication\": None,\n \"lost_timestamp\": -1,\n \"connection_client_uuid\": TILE_CLIENT_UUID,\n \"connection_event_timestamp\": 1512615234268,\n \"last_owner_update\": 1512615215149,\n \"connection_state\": \"DISCONNECTED\",\n \"ring_state\": \"STOPPED\",\n \"is_lost\": False,\n \"voip_state\": \"OFFLINE\",\n },\n \"firmware\": {\n \"expected_firmware_version\": \"\",\n \"expected_firmware_imagename\": \"\",\n \"expected_firmware_urlprefix\": \"\",\n \"expected_firmware_publish_date\": 0,\n \"expected_ppm\": None,\n \"expected_advertising_interval\": None,\n \"security_level\": 1,\n \"expiry_timestamp\": 1529471079097,\n \"expected_tdt_cmd_config\": None,\n },\n \"auth_key\": \"aliuUAS7da980asdHJASDQ==\",\n \"renewal_status\": \"LEVEL1\",\n \"metadata\": {},\n \"auto_retile\": False,\n \"status\": \"ACTIVATED\",\n \"tile_type\": \"TILE\",\n \"registration_timestamp\": 1482711833983,\n \"is_lost\": False,\n \"auth_timestamp\": 1512287015405,\n \"activation_timestamp\": 1482711835011,\n \"last_modified_timestamp\": 1514353410254,\n }\n },\n }",
"def test_request(self):\n test_sets = [#{'url':\"http://.*:8774/.*/flavors/detail\", 'data':{'candy':'yum!'}},\n {'url':\"http://.*:8774/.*/flavors/detail\", 'data':{'status_code': 90210, '_content': json.dumps({'error':{'message':'Old Gregg did not like you being in his waters!', 'detail':'Mmmm...creamy.'}})}, 'exception':exceptions.ClientException},\n ]\n for test_set in test_sets:\n response = ''\n cm = None\n self.write_inject_file(test_set)\n exp_exception = test_set.get('exception')\n if exp_exception:\n with self.assertRaises(exp_exception) as cm:\n print 'Expected exception: %s' %exp_exception\n response = self.client.flavors.list()\n else:\n response = self.client.flavors.list()\n if cm:\n print 'Exception info: %s' % vars(cm.exception)\n self.assertEqual(test_set['data']['status_code'], cm.exception.code)\n print 'Test response: %s' %response\n self.remove_inject_file()\n print '#'*80",
"def caches_mock(request):\n\n from unittest import mock\n from contextlib import ExitStack\n from dogpile.cache import make_region\n\n caches_to_mock = []\n expiration_time = 600\n\n params = __get_fixture_param(request)\n if params:\n caches_to_mock = params.get(\"caches_to_mock\", caches_to_mock)\n expiration_time = params.get(\"expiration_time\", expiration_time)\n\n with ExitStack() as stack:\n mocked_caches = []\n for module in caches_to_mock:\n region = make_region().configure('dogpile.cache.memory', expiration_time=expiration_time)\n stack.enter_context(mock.patch(module, new=region))\n mocked_caches.append(region)\n\n yield mocked_caches",
"def program_post_response_fixture() -> dict[str, Any]:\n return cast(dict[str, Any], json.loads(load_fixture(\"program_post_response.json\")))",
"def test_cache(self):\n response = self.make_call().json[0]\n self.assertFalse(response['cached']) # a call has ben made to Google API\n # each step is saved\n self.assertEqual(len(r.keys(pattern=r'step*')), int(r.get('counter')))\n self.assertEqual(int(r.get('counter')), len(response['steps']))\n pairs = set((i, j) for (i, o), (j, d) in combinations_with_replacement(list(enumerate(response['steps'])), 2) if i <= j)\n self.assertEqual(len(r.keys(pattern=r'origin*')), len(pairs)) # each combination is cached\n for i, j in pairs:\n origin, destination = response['steps'][i], response['steps'][j]\n resp = self.make_call(origin=f\"{origin['start_lat']},{origin['start_lng']}\",\n destination=f\"{destination['end_lat']},{destination['end_lng']}\").json[0]\n # No new API calls are made, cached results are returned for each possible combination of origin/dest\n self.assertEqual(origin['start_lat'], resp['start_lat']) # all coordinates should match\n self.assertEqual(origin['start_lng'], resp['start_lng'])\n self.assertEqual(destination['end_lat'], resp['end_lat'])\n self.assertEqual(destination['end_lng'], resp['end_lng'])\n self.assertTrue(resp['cached'])\n # New API call is made for transit directions. We can't recycle driving directions for this one.\n response = self.make_call(mode='transit').json\n self.assertFalse(response[0]['cached'])\n self.assertTrue(len(response) > 1) # when asking for transit directions it should yield multiple alternatives\n # driving directions should be cached already\n response = self.make_call().json[0]\n self.assertTrue(response['cached'])\n # Walking directions should not be cached\n walking = self.make_call(mode='walking').json[0]\n self.assertFalse(walking['cached'])\n # Bicycling should be treated as walking but 3 times as fast\n bicycling = self.make_call(mode='bicycling').json[0]\n self.assertTrue(bicycling['cached'])\n self.assertEqual(walking['duration'], 3 * bicycling['duration'])",
"def setUp(self):\n posts = []\n serializers = []\n self.responses = []\n for response in RESPONSES:\n with self.subTest():\n title = response['title']\n url = response['url']\n created = response['created']\n post = Post.objects.create(title=title,\n url=url,\n created=created)\n posts.append(post)\n for post in posts:\n with self.subTest(current_post=post):\n ser = PostSerializer(post)\n serializers.append(ser)\n for ser in serializers:\n with self.subTest(current_serializer=ser):\n response = Response(ser.data)\n self.responses.append(response)",
"def create_fake(cls):\n source = pkg_resources.open_text('baseball_id', 'sample.master.csv',\n encoding='iso-8859-1')\n c = lookup.Cache(source)\n return c",
"def test_guidanceresponse_1(base_settings):\n filename = base_settings[\"unittest_data_dir\"] / \"guidanceresponse-example.json\"\n inst = guidanceresponse.GuidanceResponse.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"GuidanceResponse\" == inst.resource_type\n\n impl_guidanceresponse_1(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"GuidanceResponse\" == data[\"resourceType\"]\n\n inst2 = guidanceresponse.GuidanceResponse(**data)\n impl_guidanceresponse_1(inst2)",
"def process_response(self, response: Dict) -> Iterator[dict]:",
"def fixture_chunked_json_data(tmp_path_factory, request):\n # Make root dir\n root = tmp_path_factory.mktemp(\"data\")\n\n # Set params\n num_chunks = request.param.num_chunks\n chunk_size = request.param.chunk_size\n\n # Seed JSON data\n paths = [root / Path(f\"{idx}.json\") for idx in range(num_chunks)]\n for chunk_idx, path in enumerate(paths):\n if not path.parent.exists():\n path.parent.mkdir(parents=True)\n\n content = {str(chunk_idx + idx): chunk_idx + idx for idx in range(chunk_size)}\n with path.open(\"w\") as file:\n json.dump(content, file)\n\n return root",
"def create_test_data(self):\n fake = Faker(['en_US', 'ja_JP', 'el_GR', 'de_DE'])\n\n self.actor_request = {\n 'name': fake.name(),\n 'age': random.randint(22, 88),\n 'gender': random.choice(['M', 'F'])\n }\n\n self.movie_request = {\n 'title': fake.color_name() + ' ' + fake.street_suffix(),\n 'releaseDate': str(fake.date_between())\n }\n\n self.actor_update_request = {\n 'name': fake.name(),\n }\n\n self.movie_update_request = {\n 'title': fake.color_name() + ' ' + fake.street_suffix(),\n }\n\n for _ in range(30):\n actor_name = fake.name()\n actor_age = random.randint(22, 88)\n actor_gender = random.choice(['M', 'F'])\n\n movie_title = fake.color_name() + ' ' + fake.street_suffix()\n movie_release_date = str(fake.date_between())\n\n actor = Actor(actor_name, actor_age, actor_gender)\n actor.insert()\n\n movie = Movie(movie_title, movie_release_date)\n movie.insert()\n\n for _ in range(20):\n actors = Actor.query.all()\n movies = Movie.query.all()\n\n actor_to_update = random.choice(actors)\n movie_to_update = random.choice(movies)\n actor_to_update.movies.append(movie_to_update)",
"def simulate_response(self, documents):",
"def set(self, response):\n self.data[response.url] = Page(\n response.json(),\n self._get_expiration(response.headers)\n )",
"async def test_api_template_cached(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n hass.states.async_set(\"sensor.temperature\", 30)\n\n resp = await mock_api_client.post(\n const.URL_API_TEMPLATE,\n json={\"template\": \"{{ states.sensor.temperature.state }}\"},\n )\n\n body = await resp.text()\n\n assert body == \"30\"\n\n hass.states.async_set(\"sensor.temperature\", 40)\n resp = await mock_api_client.post(\n const.URL_API_TEMPLATE,\n json={\"template\": \"{{ states.sensor.temperature.state }}\"},\n )\n\n body = await resp.text()\n\n assert body == \"40\""
] | [
"0.6123618",
"0.5608524",
"0.56062853",
"0.5596069",
"0.55481315",
"0.5504589",
"0.5498172",
"0.5480052",
"0.5423372",
"0.5308721",
"0.52908236",
"0.5271301",
"0.52630156",
"0.525457",
"0.5245118",
"0.52290255",
"0.52251756",
"0.5215446",
"0.5191281",
"0.51741076",
"0.5172798",
"0.5164142",
"0.5162481",
"0.5158709",
"0.515818",
"0.51415455",
"0.5109162",
"0.5101342",
"0.5092544",
"0.50873935"
] | 0.6490722 | 0 |
Falls back to the default fixture names if no names could be determined up to this point. | def _fallback_fixture_names(self):
if not self.request_name or not self.response_name:
warnings.warn(
"No name was specified for the recorded fixture. Falling "
"back to default names.")
if not self.request_name:
self.request_name = __default_names__[0]
if not self.response_name:
self.response_name = __default_names__[1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def populate_fixtures():\n languages()\n words()",
"def fixtures():",
"def _find_fixtures(self, start_dir):\r\n fixtures = []\r\n def _find(arg, dirname, names):\r\n if (dirname.endswith('fixtures')) and (dirname.find('unit_test')==-1):\r\n for name in names:\r\n if (name.endswith(FIXTUERS_EXT)) and (name.find('initial_data')==-1):\r\n fixtures.append(name.replace(FIXTUERS_EXT, ''))\r\n os.path.walk(start_dir, _find, None)\r\n \r\n return fixtures",
"def pytest_runtest_setup(item):\n if hasattr(item, 'fixturenames') and LOOP_KEY not in item.fixturenames:\n item.fixturenames.append(LOOP_KEY)",
"def _fixture_setup(self):\n pass",
"def load_fixtures(self):\n for fixture_dir in settings.FIXTURE_DIRS:\n fixture_dir = os.path.join(fixture_dir, self.filesystem_name)\n for (root, dirs, files) in os.walk(fixture_dir):\n for file in files:\n full_file_path = os.path.join(root, *dirs, file)\n with open(full_file_path, 'rb') as f:\n self.save(os.path.relpath(full_file_path, fixture_dir), f)",
"def setUp(self):\n self.fixture_file = r\"v:\\workspace\\FileHandling\\src\\test-read-write.txt\"\n self.fixture_list = [\"my\", \"written\", \"text\"]\n self.fixture_list_empty_strings = [\"my\", \"\", \"\", \"written\", \"text\"]\n self.fixture_list_trailing_empty_strings = [\"my\", \"written\", \"text\", \"\", \"\"]",
"def setUp(self):\n\n fq_dataset_name = self.fq_table_names[0].split('.')\n self.fq_dataset_name = '.'.join(fq_dataset_name[:-1])\n\n fq_sandbox_name = self.fq_sandbox_table_names[0].split('.')\n self.fq_sandbox_name = '.'.join(fq_sandbox_name[:-1])\n\n super().setUp()",
"def start_fixture(self):\n pass",
"def setUp(self):\n self.fixtureFile = r\"v:\\workspace\\FileHandling\\src\\test-read-write.txt\"\n self.fixtureList = [\"my\", \"written\", \"text\"]\n self.fixtureListEmptyStrings = [\"my\", \"\", \"\", \"written\", \"text\"]\n self.fixtureListTrailingEmptyString = [\"my\", \"written\", \"text\", \"\", \"\"]",
"def setUpFixture(self):\n pass",
"def fixture_name(self):\n return \"coding_dna_substitution\"",
"def generate_tests(self, fixture):\n if fixture.startswith(\"splunk_searchtime_fields\"):\n yield from self.dedup_tests(\n self.fieldtest_generator.generate_tests(fixture),\n fixture\n )\n elif fixture.startswith(\"splunk_searchtime_cim\"):\n yield from self.dedup_tests(\n self.cim_test_generator.generate_tests(fixture),\n fixture\n )",
"def fixture_microbial_sample_name():\n return \"microbial_name_test\"",
"def fixture_other_case() -> str:\n return \"angrybird\"",
"def load_initial_fixtures_func(app_name):\n return partial(_load_initial_fixtures_impl, app_name)",
"def fixture_name(self):\n return \"coding_dna_insertion\"",
"def tearDownFixture(self):\n pass",
"def expected_city_names_fixture():\n return {'b', 'a', 'c'}",
"def fixture_name(self):\n return \"coding_dna_deletion\"",
"def load_test_subjects_names(self):\n files = os.listdir(os.path.join(self.db_path, self.test_batch))\n for f in files:\n if f.startswith('test-volume'):\n s_name = str.split(str.split(f, '.')[0], '-')[-1]\n self.testing_subjects.append(s_name)\n self.n_test = len(self.testing_subjects)",
"def test_template_name():\n for t in templates:\n assert len(t.name) > 0",
"def test_Defaults(self):\n self._run(self._test_scenarios, \"Defaults\")",
"def set_default_fitscenarios(self, default_dict):\n try:\n self.setup.set_defaults(default_dict)\n return 1\n except:\n return 0",
"def setFixtureParamNames(request, orderedParamNameList):\n numParams = len(request.param)\n request.keywords.setdefault(\n \"fixture_param_names\",\n dict())[request.fixturename] = orderedParamNameList[:numParams]",
"def fixture_name(self):\n return \"genomic_silent_mutation\"",
"def test_defaults_are_kept_if_not_specified_in_args(\n self, junit4_hooks, full_args\n ):\n args = empty_args(master_repo_names=MASTER_REPO_NAMES)\n expected_ignore_tests = [\"some\", \"tests\"]\n expected_hamcrest_path = HAMCREST_PATH\n expected_junit_path = JUNIT_PATH\n expected_rtd = RTD\n expected_disable_security = False\n\n junit4_hooks._ignore_tests = expected_ignore_tests\n junit4_hooks._hamcrest_path = expected_hamcrest_path\n junit4_hooks._junit_path = expected_junit_path\n junit4_hooks._reference_tests_dir = expected_rtd\n junit4_hooks._disable_security = expected_disable_security\n\n junit4_hooks.parse_args(args)\n\n assert junit4_hooks._ignore_tests == expected_ignore_tests\n assert junit4_hooks._hamcrest_path == expected_hamcrest_path\n assert junit4_hooks._junit_path == expected_junit_path\n assert junit4_hooks._reference_tests_dir == expected_rtd\n assert junit4_hooks._disable_security == expected_disable_security",
"def __init_fixture_methods(self):\n # init our self.(class_setup|setup|teardown|class_teardown)_fixtures lists\n for fixture_type in fixture_types:\n setattr(self, \"%s_fixtures\" % fixture_type, [])\n\n # for setup methods, we want oldest class first. for teardowns, we want newest class first\n hierarchy = list(reversed(type(self).mro()))\n for cls in hierarchy[1:]:\n # mixins on TestCase instances that derive from, say, object, won't be set up properly\n if hasattr(cls, '_fixture_methods'):\n # the metaclass stored the class's fixtures in a _fixture_methods instance variable\n for fixture_type, fixture_methods in cls._fixture_methods.iteritems():\n bound_fixture_methods = [instancemethod(func, self, self.__class__) for func in fixture_methods]\n if fixture_type.endswith('setup'):\n # for setup methods, we want methods defined further back in the\n # class hierarchy to execute first\n getattr(self, \"%s_fixtures\" % fixture_type).extend(bound_fixture_methods)\n else:\n # for teardown methods though, we want the opposite\n setattr(self, \"%s_fixtures\" % fixture_type, bound_fixture_methods + getattr(self, \"%s_fixtures\" % fixture_type))",
"def _get_fixture(item, arg_name, fixture=None):\n if arg_name == \"request\":\n # Support parameterized fixture\n if fixture:\n try:\n item._request.param = item._pyfuncitem.callspec.params[fixture.argname]\n except (AttributeError, KeyError) :\n pass\n\n return item._request\n\n if arg_name == \"self\":\n raise Ignore\n\n _fixtureinfo = item._fixtureinfo\n fixtures = sorted(\n _fixtureinfo.name2fixturedefs[arg_name], key=lambda x: not x.has_location\n )\n return fixtures[0]",
"def setup_suite(dataset):\n for speaker in create_all_speakers(dataset):\n if speaker.is_pickle_saved():\n print('{} already exists.'.format(speaker.output_name))\n else:\n speaker.safe_to_pickle()"
] | [
"0.62607694",
"0.6084847",
"0.59035265",
"0.58864886",
"0.5782445",
"0.56454164",
"0.5598075",
"0.5590924",
"0.55757374",
"0.55625635",
"0.5513001",
"0.5497023",
"0.54902035",
"0.5441633",
"0.5436192",
"0.5364549",
"0.536005",
"0.535958",
"0.53501177",
"0.5234277",
"0.521221",
"0.5210105",
"0.51943034",
"0.5190611",
"0.51895875",
"0.51807374",
"0.51774925",
"0.5143355",
"0.51406276",
"0.5123066"
] | 0.7567539 | 0 |
Generate ics from days. | def generate_ics(days: Sequence[dict], filename: Text) -> None:
cal = Calendar()
cal.add("X-WR-CALNAME", "中国法定节假日")
cal.add("X-WR-CALDESC", "中国法定节假日数据,自动每日抓取国务院公告。")
cal.add("VERSION", "2.0")
cal.add("METHOD", "PUBLISH")
cal.add("CLASS", "PUBLIC")
cal.add_component(_create_timezone())
days = sorted(days, key=lambda x: x["date"])
for fr, to in _iter_date_ranges(days):
start = _cast_date(fr["date"])
end = _cast_date(to["date"]) + datetime.timedelta(days=1)
name = fr["name"] + "假期"
if not fr["isOffDay"]:
name = "上班(补" + name + ")"
cal.add_component(_create_event(name, start, end))
with open(filename, "wb") as f:
f.write(cal.to_ical()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_ics(events, config):\n\n # Create the Calendar\n calendar = icalendar.Calendar()\n calendar.add('prodid', config.calendar_prodid)\n calendar.add('version', '2.0')\n calendar.add('method', 'publish')\n\n for event_data in events:\n # Create the event\n event = icalendar.Event()\n\n # Populate the event\n event.add('summary', event_data['title'])\n event.add('description', get_description(event_data))\n event.add('uid', event_data['id'])\n event.add('location', event_data['place'])\n event.add('dtstart', get_datetime(event_data, 'when_start'))\n if event_data['when_end']:\n event.add('dtend', get_datetime(event_data, 'when_end'))\n event.add('dtstamp', datetime.datetime.now())\n\n # Add the event to the calendar\n calendar.add_component(event)\n\n return calendar.to_ical()",
"def generate_dates(self):\r\n\r\n numdays = 20\r\n\r\n base = datetime.datetime.today()\r\n\r\n date_list = [base + datetime.timedelta(days=x) for x in range(numdays)]\r\n\r\n date_str = [x.strftime(\"%d-%m-%Y\") for x in date_list]\r\n\r\n return date_str",
"def generate_days(ndays, year=2022, month=1, day=1):\n # NOTE: This method is more efficient than the \"string parsing\"\n # method used by generate_months() and generate_years(),\n # but this only matters if generating a lot of entries\n # and it only works if the datetime64-represented\n # distance between units to generate is constant\n day_indexes = np.arange(ndays, dtype=np.int64) # 0, 1, ..., [ndays-1]\n startdate = np.datetime64(f'{year:02d}-{month:02d}-{day:02d}T00:00:00.000000', 'us')\n usec_per_day = int(1e6) * 86400 # 86.4k sec per day = 60*60*24s\n usec_offsets = day_indexes * usec_per_day\n return usec_offsets + startdate",
"def as_ical(self):\n if self.date_is_approximate:\n return None\n\n ymd = (self.date.year, self.date.month, self.date.day)\n event_date = date(*ymd)\n event = icalendar.Event()\n event.add(\"dtstart\", event_date)\n event.add(\"dtend\", event_date + timedelta(days=1))\n event.add(\"uid\", self.ical_uid)\n event.add(\"summary\", \"Django Girls %s\" % self.city)\n event.add(\"location\", f\"{self.country}, {self.city}\")\n return event",
"def gen_dates(train_per_start, hours_inc=48, n_inc=10, hours1_inc=6, n1_inc=4):\n dates = []\n train_per = train_per_start[:]\n for i_inc in range(n_inc):\n # '2014-06-24 01:00:00','2014-06-30 00:00:00'\n train_per1 = train_per[:]\n for i1_inc in range(n1_inc):\n dates.append(train_per1[:])\n train_per1[0] = add_hour(train_per1[0], hours1_inc)\n train_per1[1] = add_hour(train_per1[1], hours1_inc)\n train_per[0] = add_hour(train_per[0], hours_inc)\n train_per[1] = add_hour(train_per[1], hours_inc)\n return dates",
"def generate_days(self, nr_of_days):\n log = []\n names = self.load_names()\n\n for i in range(0, nr_of_days):\n log.extend(self.generate_day_cycle(names))\n\n return log",
"def generate_days_list():\n\n seven_days = []\n\n for i in xrange(1, 8):\n seven_days.append([i, 0])\n\n return seven_days",
"def generate_day_cycle(self, names):\n day_log = []\n time_delta = timedelta(days=1)\n\n for i in range(0, len(self.HOUR_SHEET)):\n if self.is_time_for_bruteforce(i):\n day_log.extend(self.generate_brute_force_log(i, names))\n\n day_log.extend(self.generate_hour_cycle(i, names))\n\n day_log.sort()\n\n self.date += time_delta\n\n return day_log",
"def get_ic(self):\n return self.dt, self.dr, self.dtheta, self.dphi",
"def draw_day(day):\n\n day_drawing = \"\"\n for i in day:\n for j in i:\n day_drawing += j\n return day_drawing",
"def generate_date_set(self, year, month, days):\n dates = set()\n for day in days:\n dates.add(date(year, month, day))\n return dates",
"def output(self):\n return self.cal.to_ical()",
"def output(self):\n return self.cal.to_ical()",
"def ical_string(self) -> str:\n tz = ''\n if self.timezone != '':\n tz = ';TZID=' + self.timezone\n result = ['BEGIN:VCALENDAR',\n 'BEGIN:VEVENT',\n 'CREATED:' + self._created,\n 'DESCRIPTION:' + self.description,\n 'DTEND' + tz + ':' + self._end,\n 'DTSTAMP' + tz + ':' + self._dtstamp,\n 'DTSTART' + tz + ':' + self._start,\n 'LAST-MODIFIED:' + self.lastmodified,\n 'LOCATION:' + self.location,\n 'SEQUENCE:' + str(self._sequence),\n 'SUMMARY:' + self.summary,\n 'UID:' + self._uid,\n 'END:VEVENT',\n 'END:VCALENDAR']\n return '\\n'.join(result)",
"def visualize_days():\n\t\n\t#grab our parsed data that we parsed earlier\n\tdata_file = parse(MY_FILE, \",\")\n\t\n\t#make a new variable, counter, from iterating through each line of\n\t#data in the parsed data, and count how many incidents happen on each\n\t#day of the week\n\tcounter = Counter(item[\"DayOfWeek\"] for item in data_file)\n\t\n\t#separate the x-axis data (days of the week) from the counter variable\n\t#from the y-axis (number of incidents each day)\n\tdata_list = [\n\t\t\t\tcounter[\"Monday\"],\n\t\t\t\tcounter[\"Tuesday\"],\n\t\t\t\tcounter[\"Wednesday\"],\n\t\t\t\tcounter[\"Thursday\"],\n\t\t\t\tcounter[\"Friday\"],\n\t\t\t\tcounter[\"Saturday\"],\n\t\t\t\tcounter[\"Sunday\"]\n\t\t\t\t]\n\tday_tuple = tuple([\"Mon\", \"Tues\", \"Wed\", \"Thurs\", \"Fri\", \"Sat\", \"Sun\"])\n\t\n\t#with y-axis data, assign it to a matplotlib plot instance\n\tplt.plot(data_list)\n\t\n\t#create amount of ticks need for x and y axes and assign labels\n\tplt.xticks(range(len(day_tuple)), day_tuple)\n\t\n\t#save the plot\n\tplt.savefig(\"Days.png\")\n\t\n\t#close plot file\n\tplt.clf()",
"def hydrate_date(days):\n return Date.from_ordinal(unix_epoch_date_ordinal + days)",
"def ical(self) -> Calendar:\n cal = Calendar()\n event = IEvent()\n event.add(\"summary\", \"Video Chat\")\n event.add(\"dtstart\", self.start)\n cal.add_component(event)\n return cal.to_ical()",
"def visualize_days():\n\n # grab our parsed data that we parsed earlier\n data_file = parse(MY_FILE, \",\")\n\n counter = Counter(item['DayOfWeek'] for item in data_file)\n\n data_list = [\n counter['Monday'],\n counter['Tuesday'],\n counter['Wednesday'],\n counter['Thursday'],\n counter['Friday'],\n counter['Saturday'],\n counter['Sunday']\n ]\n\n day_tuple = tuple(['Mon','Tues','Wed','Thurs','Fri','Sat','Sun'])\n\n plt.plot(data_list)\n\n # num of ticks needed for our x-axis & assign labels\n plt.xticks(range(len(day_tuple)),day_tuple)\n \n plt.savefig(\"Days.png\")\n plt.clf()",
"def day2datetime(scenario,days):\r\n\t\tdate_int = np.empty((len(days)));date_int[:]=np.nan\r\n\t\tstart_year =2000\r\n\t\tstart =(start_year*365)\r\n\t\tith=0\t\r\n\t\tfor iday in days:\r\n\t\t\tmonth_days =np.array([31,28,31,30,31,30,31,31,30,31,30,31])\r\n\t\t\tcalendar_days = np.array([0,31,59,90,120,151,181,212,243,273,304,334,365])\r\n\t\t\ttotal_days = int(iday) + start; \r\n\t\t\tyear = total_days//365; \r\n\t\t\tremainder = total_days%365\r\n\t\t\tif remainder ==0: year=year-1;month=12;day=31\r\n\t\t\telse: \r\n\t\t\t\tmonth = 1+[layer for layer in range(len(calendar_days)) if calendar_days[layer]< remainder and calendar_days[layer+1]>=remainder][0]\r\n\t\t\t\tday = int(remainder - calendar_days[month-1])\r\n\t\t\t\tif day == 0: day = month_days[month-1]\r\n\t\t\tdate_int[ith] = year*10000+month*100+day\r\n\t\t\tith=ith+1\r\n\t\treturn date_int.astype(int)",
"def getComparableDateValues(self, days):\n dates = []\n for i in days:\n date = i[:10]\n dates.append(date)\n return dates",
"def get_days(view, restriction):\n today = datetime.today()\n first_day = today - timedelta(days=today.weekday()) # Current week's Monday\n first_day += timedelta(days=NDAYS*view) # Go back/forward view weeks\n\n days, months = [], set()\n for i in range(0, NDAYS):\n i_day = first_day + timedelta(days=i)\n months.add(i_day.strftime(\"%B\"))\n\n elements = {\n 'deliveries':build_data([\n ud for ud in UDnotDone() if restriction(ud, i_day)\n ]) if i_day >= today else [],\n 'day_name': i_day.strftime(\"%A\") + \" \" + str(i_day.day),\n 'color': day_color(today, i_day),\n 'class': \"hideCalendarMobile\" if i_day < today else \"\",\n 'id': str(i_day.day)+\"-\"+str(i_day.month)}\n days.append(elements)\n return days, \"/\".join(list(months))",
"def daily_table(self):\n htable = [0 for i in range(7)]\n for i in range(self.dataset.shape[0]):\n stime = time.localtime(np.int32(self.dataset[i][2]))\n evtime = stime[6]\n htable[evtime] += 1\n return htable",
"def _tr_cal_date(self, date):\n items = []\n for code in self._datefmt:\n if code == 'Y':\n items += [date.year_str]\n elif code == 'M':\n if '/' in self._datefmt or '.' in self._datefmt:\n month = date.month_num\n if month is not None:\n month = \"{:02d}\".format(month)\n else:\n month = self._monthName(date.month)\n if month is not None:\n items += [month]\n elif code == 'D':\n day = date.day\n if day is not None and ',' in self._datefmt:\n items += [str(\"{:02d},\".format(day))]\n elif day is not None:\n items += [\"{:02d}\".format(day)]\n if '/' in self._datefmt:\n sep = '/'\n elif '.' in self._datefmt:\n sep = '.'\n elif '-' in self._datefmt:\n sep = '-'\n else:\n sep = ' '\n return sep.join(items)",
"def visualize_days(parsed_data, output_dir):\n\n # Returning no. of incidents by each day of the week\n counter = fetch_incident_by_days(parsed_data)\n\n # data_list = fetch_incident_by_days.keys()\n\n # Separating the counter to have an ordered list\n y_values = [\n counter[\"Monday\"],\n counter[\"Tuesday\"],\n counter[\"Wednesday\"],\n counter[\"Thursday\"],\n counter[\"Friday\"],\n counter[\"Saturday\"],\n counter[\"Sunday\"]\n ]\n\n # Creating labels for x-axis\n x_labels = tuple([\"Mon\", \"Tues\", \"Wed\", \"Thurs\", \"Fri\", \"Sat\", \"Sun\"])\n\n # Assigning the data to plot\n plt.plot(y_values)\n\n # Assigning xticks on x-axis\n plt.xticks(range(len(x_labels)), x_labels)\n\n # Save the graph and show the figure\n file_name = os.path.join(output_dir, DAYS_PLOT_FILENAME)\n plt.savefig(file_name)\n plt.show()",
"def ANdatefixer(years):\n\n\n\t# ========== create the new dates ==========\n\t# year = ds.Year\n\n\t# +++++ set up the list of dates +++++\n\tdates = OrderedDict()\n\ttm = [dt.datetime(int(year) , 6, 30) for year in years]\n\tdates[\"time\"] = pd.to_datetime(tm)\n\n\tdates[\"calendar\"] = 'standard'\n\tdates[\"units\"] = 'days since 1900-01-01 00:00'\n\t\n\tdates[\"CFTime\"] = date2num(\n\t\ttm, calendar=dates[\"calendar\"], units=dates[\"units\"])\n\n\treturn dates",
"def create_calendar(actions, location_and_time_axes):\n calendar = ical.Calendar()\n calendar['PRODID'] = '{} {}'.format(ical.__name__, ical.__version__)\n calendar['VERSION'] = 2.0\n calendar['X-WR-CALNAME'] = 'PyCon.DE 2018'\n\n for location, date in actions.keys():\n meta_info = location_and_time_axes[(date.year, date.month, date.day)]\n time_axis = meta_info['time_axis']\n for action in actions[(location, date)]:\n if action['title'] == 'End':\n continue\n\n event = create_event(action, date, location, time_axis)\n\n calendar.add_component(event)\n\n return calendar",
"def dayPeriod(lon,lat,n1,n2,day):\n x, y, z = _getXYZ(lon,lat)\n N = range(n1,n2+1)\n D = []\n for n_ in N:\n n = n_ * day\n i = range(0,n)\n j = range(n,n+n)\n d_ = gcDist(x[i],y[i],z[i],\n x[j],y[j],z[j])\n D = D + [d_,]\n print n, d_\n\n return (N,D)",
"def generate_sic_monthly():\n # Set variable\n nc_file = '/home/disk/sipn/rclancy/ecmwf/pf/predictability/SIC/SIC.nc'\n var_name = 'SIC'\n var_date_name = 'SIC_dates'\n\n # Get variables\n var, var_dates =read_nc_var(nc_file, var_name, var_date_name)\n # Get associated dates\n var_year, var_month, var_day = get_y_mo_d(var_dates)\n\n SIC_present=np.zeros([12, 20, 240])\n for m in range(1,13):\n mo_ind = np.array(var_month==m)\n SIC_present[m-1,:,:] = np.nanmean(var[:,:,mo_ind],2)\n\n SIC_present[SIC_present>0]=1\n return SIC_present;",
"def day2datetime(scenario,days):\r\n\t\tdate_int = np.empty((len(days)));date_int[:]=np.nan\r\n\t\tif scenario =='T1970C': start_year =1970\r\n\t\telse: start_year =2010\r\n\t\tstart =(start_year*365)\r\n\t\tith=0\t\r\n\t\tfor iday in days:\r\n\t\t\tmonth_days =np.array([31,28,31,30,31,30,31,31,30,31,30,31])\r\n\t\t\tcalendar_days = np.array([0,31,59,90,120,151,181,212,243,273,304,334,365])\r\n\t\t\ttotal_days = int(iday) + start; \r\n\t\t\tyear = total_days//365; \r\n\t\t\tremainder = total_days%365\r\n\t\t\tif remainder ==0: year=year-1;month=12;day=31\r\n\t\t\telse: \r\n\t\t\t\tmonth = 1+[layer for layer in range(len(calendar_days)) if calendar_days[layer]< remainder and calendar_days[layer+1]>=remainder][0]\r\n\t\t\t\tday = int(remainder - calendar_days[month-1])\r\n\t\t\t\tif day == 0: day = month_days[month-1]\r\n\t\t\tdate_int[ith] = year*10000+month*100+day\r\n\t\t\tith=ith+1\r\n\t\treturn date_int.astype(int)",
"def day_range():\n DAYS = range(1, 32)\n days = map(lambda x: (x, x), DAYS)\n return days"
] | [
"0.6347691",
"0.6087289",
"0.6072295",
"0.59718066",
"0.57362044",
"0.5708821",
"0.56834716",
"0.5679318",
"0.5669046",
"0.56072843",
"0.5576914",
"0.557137",
"0.557137",
"0.553295",
"0.54978454",
"0.5415384",
"0.5413129",
"0.53640497",
"0.5288981",
"0.52658474",
"0.52517194",
"0.52397335",
"0.52350885",
"0.5234849",
"0.5156906",
"0.5156129",
"0.5150557",
"0.5128345",
"0.51163435",
"0.5114991"
] | 0.78284 | 0 |
Get user profile Fetches from the user collection by using the user's email as key. | def get_user_profile(email): # GET
# NOTE: This method previously called LCS with director credentials in order to retrieve the user's name
# We will update TeamRU to store names along with our user objects, saving the need to call LCS again
user_profile = coll("users").find_one({"_id": email})
if not user_profile:
return {"message": "User not found"}, 404
user_profile["user_id"] = user_profile.pop("_id")
return user_profile, 200 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _getProfileFromUser(self):\n # Make sure user is authenticated\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n # Get Profile from datastore\n user_id = user.email()\n p_key = ndb.Key(Profile, user_id)\n profile = p_key.get()\n # Create new Profile if not there\n if not profile:\n profile = Profile(\n key = p_key,\n displayName = user.nickname(),\n mainEmail= user.email(),\n teeShirtSize = str(TeeShirtSize.NOT_SPECIFIED),\n )\n profile.put()\n return profile",
"def get_user(cls, email=None, user_id=None):\n\n params = {'email': email, 'user_id': user_id}\n user_dict = cls._do_call(\n 'GET', cls.api_endpoint + 'users', params=params)\n return user_dict",
"def get_user_by_email(self, strategy, email):\r\n return strategy.storage.user.user_model().objects.get(email=email)",
"def helper_get_by_email(user_email):\n user = heart_rate_databases_starter.models.User.objects.raw({\"_id\": user_email}).first() # Get the first user where _id=email\n return user",
"def get_info(email):\n # Get the first user where _id=email\n user = models.User.objects.raw({\"_id\": email}).first()\n return user",
"def get_user_profile(self):\n return self.request('get', 'id/users')",
"def load_user(user_email):\n return User.query.get(user_email)",
"def get_user(email, queryset=None):\n if queryset is None:\n queryset = User.objects\n return queryset.get(username=_email_to_username(email))",
"def user(email):\r\n return User.objects.get(email=email)",
"def get_user(self, user_id):\n _email = self._email_for_user_id(user_id)\n response = self._get('/users?{0}'.format(urllib.urlencode({'search': _email})))\n for _user in response:\n if _user['email'] == _email:\n return _user\n return None",
"def get_user_by_email(self, emailid):\n payload = {'appkey': self._lr_object._get_api_key(), 'appsecret': self._lr_object._get_api_secret(),\n 'emailid': emailid}\n url = SECURE_API_URL + \"raas/v1/user\"\n return self._lr_object._get_json(url, payload)",
"def get_user(self, email):\n return run_transaction(\n self.sessionfactory,\n lambda session: get_user_txn(session, email))",
"def get_user_by_email(cls, user_email):\n\n try:\n user_login_info = User.query.filter_by(email=user_email).one()\n\n return user_login_info\n\n except Exception, error:\n print error",
"def read_user_profile():\n logger.debug(\"entering function read_profile\")\n find_query = {\"user_id\": current_user.id}\n project_query = {\"_id\": 0, \"user_id\": 0, \"password\": 0}\n result = run_find_one_query(config.USERS_COL, find_query, project_query, error=True,\n error_msg=NO_USER_ERR_MSG)\n logger.info(\"fetched user profile for %s\", current_user.id)\n response = get_success_response(data=result)\n logger.debug(\"exiting function read_profile\")\n return response",
"def get_by_email(self, email):\n user = (\n self.session\n .query(tables.User)\n .filter_by(email=email)\n .first()\n )\n return user",
"def retrieve_user(self, email):\n if self.database is None:\n raise Exception(\"No database.\")\n if email is None or len(email) == 0:\n raise Exception(\"Bad parameter.\")\n return self.database.retrieve_user(email)",
"def current_user(email):\n for user in Data.users:\n if email == user['email']:\n return user",
"def show(self, email):\n\n return User.query.filter_by(email=email).first()",
"def getUserbyEmail(self, email):\n\n cursor = self.conn.cursor()\n query = \"SELECT uid, cid, ufirstname, ulastname, udescription, urole, uclassification, email, pin \" \\\n \"FROM Users natural inner join Credential \" \\\n \"WHERE email= %s;\"\n cursor.execute(query, (email,))\n result = cursor.fetchone()\n return result",
"def users_profile_query(self):\n email_query = request.args.get('email')\n if not email_query:\n self.logger.debug((messages.MISSING_FIELDS_ERROR % \"email\"))\n return messages.ERROR_JSON % (messages.MISSING_FIELDS_ERROR % \"email\"), 400\n try:\n user_data = self.auth_server.profile_query(email_query)\n except UnexistentUserError:\n self.logger.debug(messages.USER_NOT_FOUND_MESSAGE % email_query)\n return messages.ERROR_JSON % (messages.USER_NOT_FOUND_MESSAGE % email_query), 404\n return json.dumps(user_data), 200",
"def get_user_by_email(self, email: str):\n try:\n return model_to_dict(\n User.select().where(User.email == email).get())\n except DoesNotExist:\n raise ValueError(HTTPStatus.NOT_FOUND,\n 'User with email {} does not exist'.format(email))\n except Exception:\n raise BaseException(HTTPStatus.INTERNAL_SERVER_ERROR,\n 'Internal server error')",
"async def get_by_email(self, email: str) -> Optional[UD]:\n user = await looped_fetch(\n self.async_deta_base.fetch, query={\"email\": email.lower()}\n )\n\n return self.user_db_model(**user) if user else None",
"def get_user_by_email(email):\n\n user = User.query.filter(User.email == email).first()\n \n return user",
"def get_user(current_user):\n for user in user_db:\n if user['email'] == current_user:\n return user",
"def get_user(self, email):\n\n try:\n return self.client.admin_get_user(\n Username=email,\n UserPoolId=self.user_pool_id\n )\n except self.client.exceptions.UserNotFoundException:\n raise Exception('An account with the given email does not exist.')",
"def get_my_profile(self):\n\n url = self.api_base_url + \"user/profile\"\n\n try:\n raw_response = self.request_handler.make_request(ApiRequestHandler.GET, url)\n except RequestFailed:\n raise\n\n jsonified_response = json.loads(raw_response.text)\n user_profile = jsonified_response\n\n return user_profile",
"def get_user_profile(self):\n return self.user.profile",
"def get_user(self):\n try:\n return User.objects.get(id=self.user_id)\n except User.DoesNotExist:\n return AnonymousProfile()",
"def get_user_by_email(email):\n user = User.query.filter(User.email == email).first()\n result = userSchema.dump(user)\n return jsonify(result)",
"def get(\n user_id=None, discord_id=None, google_id=None, email=None,\n ):\n temp_cursor = user_db.cursor()\n\n pos_selectors = {\n \"user_id\": user_id,\n \"discord_id\": discord_id,\n \"google_id\": google_id,\n \"email\": email,\n }\n\n user = None\n for selector in pos_selectors.keys():\n sel_value = pos_selectors[selector]\n if sel_value is None:\n continue\n user = temp_cursor.execute(\n \"SELECT * FROM users WHERE \" + selector + \" = ?\", (sel_value,)\n ).fetchone()\n\n if user is not None:\n return User_Info.init_from_db(user)\n\n return None"
] | [
"0.74857944",
"0.7336757",
"0.7331344",
"0.7298807",
"0.72768545",
"0.72726756",
"0.71711785",
"0.7168781",
"0.7130947",
"0.71272796",
"0.70798576",
"0.70757645",
"0.7041153",
"0.6998165",
"0.6979574",
"0.6948253",
"0.69294655",
"0.69286436",
"0.69207954",
"0.6915467",
"0.6889324",
"0.6851269",
"0.6831796",
"0.68107396",
"0.6796974",
"0.67915887",
"0.67849123",
"0.6779606",
"0.67747545",
"0.6767703"
] | 0.8088444 | 0 |
Create user profile Creates a new user profile from the user email, skills, prizes, and other fields. | def create_user_profile(email, **kwargs): # POST
user_exists = coll("users").find_one({"_id": email})
if user_exists:
return {"message": "User already exists"}, 400
# NOTE Doesn't make sense for a person to have prizes only a team should have this
coll("users").insert_one(
{
"_id": email,
"skills": kwargs["skills"],
"prizes": kwargs["prizes"],
"bio": kwargs["bio"],
"github": kwargs["github"],
"interests": kwargs["interests"],
"seriousness": kwargs["seriousness"],
"team_id": "",
"hasateam": False,
}
)
return {"message": "User profile successfully created"}, 201 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create(self, data):\n # Make User\n username = data['email'].split(\"@\")[0]\n user = User.objects.create_user(**data, username=username, is_verified=False, is_client=True)\n Profile.objects.create(user=user)\n send_confirmation_email.delay(user_pk=user.pk)\n return user",
"def create_user_profile(sender, instance, created, **kwargs):\n if created:\n profile = UserProfile()\n profile.user = instance\n profile.email=instance.email\n profile.save()",
"def create(self, validated_data):\n request = self.context.get('request')\n profile = Profile(**validated_data)\n profile.user = request.user\n profile.save()\n return profile",
"def profile_create(faker_obj=fake_init()):\n profile = faker_obj.simple_profile()\n user = User.objects.create(\n username=profile[\"username\"],\n email=profile[\"mail\"],\n password=profile[\"username\"][::-1],\n )\n return user.id",
"def create(self, validated_data):\r\n user_data = validated_data.pop('user')\r\n user = UserSerializer.create(UserSerializer(), validated_data = user_data)\r\n profile, created = Profile.objects.update_or_create(user = user,\r\n bio = validated_data.pop('bio'),\r\n location = validated_data.pop('location'),\r\n birth_date = validated_data.pop('birth_date'))\r\n return profile",
"def create(self, validated_data):\n user_data = validated_data.pop('user')\n user = UserSerializer.create(UserSerializer(), validated_data=user_data)\n profile, created = Profile.objects.update_or_create(\n user=user,\n avatar=validated_data.pop('avatar'),\n biography=validated_data.pop('biography'),\n link=validated_data.pop('link') \n )\n return profile",
"def create (self, validated_data):\n user = models.UserProfile.objects.create_user(\n email = validated_data ['email'],\n name = validated_data ['name'],\n password = validated_data ['password']\n )\n\n return user",
"def create(self, validated_data):\n user = UserProfile.objects.create_user(\n email=validated_data['email'],\n first_name = validated_data['first_name'],\n last_name = validated_data['last_name'],\n password = validated_data['password']\n )\n return user",
"def create_user_profile(instance, created, **_):\n if created:\n Profile.objects.create(user=instance)",
"def create_profile(self, user, *args, **kwargs):\n salt = hashlib.sha1(str(random.random())).hexdigest()[:5]\n activation_key = hashlib.sha1(salt + user.username).hexdigest()\n return self.create(user=user, activation_key=activation_key, **kwargs)",
"def create_profile_for_new_user(sender, created, instance, **kwargs):\n if created:\n profile = self.get_model('profile')(user=instance)\n profile.save()",
"def create_user_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)",
"def create_user_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)",
"def create_user_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)",
"def create(self, validated_data):\n user = UserProfile.objects.create_user(\n email=validated_data[\"email\"],\n name=validated_data[\"name\"],\n password=validated_data[\"password\"]\n )\n\n return user",
"def create(self, validated_data):\n user = models.UserProfile.objects.create_user(\n email=validated_data['email'],\n username=validated_data['username'],\n password=validated_data['password'],\n\n )\n\n return user",
"def create_user_profile(sender, instance, created, **kwargs):\n if created:\n user_profile = UserProfile.objects.create(user=instance)",
"def create(self, validated_data):\n\n user = models.UserProfile(\n username=validated_data['username'],\n email=validated_data['email'],\n first_name=validated_data['first_name'],\n mobile_number=validated_data['mobile_number'],\n )\n\n user.set_password(validated_data['password'])\n user.save()\n\n return user",
"def createUserProfile(user):\n MyProfile.objects.get_or_create(user=user)",
"def create_profile(sender, **kwargs):\n user = kwargs[\"instance\"]\n if kwargs[\"created\"]:\n user_profile = Profile(user=user)\n user_profile.save()",
"def create(self, validated_data):\n user = User.objects.create(\n first_name=validated_data.get('first_name'),\n middle_name=validated_data.get('middle_name'),\n last_name=validated_data.get('last_name'),\n email=validated_data.get('email'),\n username=validated_data.get('username'),\n mobile_number=validated_data.get('mobile_number'),\n gender=validated_data.get('gender'),\n is_active=validated_data.get('is_active'),\n country=validated_data.get('country'),\n address=validated_data.get('address'),\n role=validated_data.get('role'),\n )\n if self.context['request'].data.get('file_profile_picture') is not None:\n user.profile_picture = self.context['request'].data['file_profile_picture']\n if self.context['request'].data.get('file_signature') is not None:\n user.signature = self.context['request'].data['file_signature']\n user.set_password(validated_data.get('password'))\n user.save()\n return user",
"def create_user(self, *args, **kwargs):\n user = User.objects.create_user(*args, **kwargs)\n return get_profile(user)",
"def create(self, validated_data):\n\n user = models.UserProfile(\n email=validated_data['email'],\n name=validated_data['name']\n )\n\n user.set_password(validated_data['password'])\n user.save()\n return user",
"def create_user_profile(sender, instance, created, **kwargs):\n if created:\n # create new Stellar account\n stellar.api.create_account(user=instance)",
"def create(self, validated_data):\n ## overriding default create\n\n user = UserProfile.objects.create_user(\n email = validated_data['email'],\n name = validated_data['name'],\n password=validated_data['password']\n )\n \n return user",
"def create_user_profile(sender, instance, created, **kwargs):\n\n if created:\n user_profile = UserProfile.objects.create(user=instance)",
"def profile(**kwargs):\n defaults = {'name': 'Test K. User', 'bio': 'Some bio.',\n 'website': 'http://support.mozilla.com',\n 'timezone': None, 'country': 'US', 'city': 'Mountain View',\n 'locale': 'en-US'}\n if 'user' not in kwargs:\n u = user(save=True)\n defaults['user'] = u\n defaults.update(kwargs)\n\n p = Profile(**defaults)\n p.save()\n return p",
"def create(self, validated_data):\n\n user = models.UserProfile(\n email=validated_data['email'],\n name=validated_data['name']\n )\n\n user.set_password(validated_data['password'])\n user.save()\n\n return user",
"def create_profile(self, user):\n salt = sha.new(str(random.random())).hexdigest()[:5]\n activation_key = sha.new(salt+user.username).hexdigest()\n return self.create(user=user,\n activation_key=activation_key)",
"def create_profile(self, user):\r\n salt = sha.new(str(random.random())).hexdigest()[:5]\r\n activation_key = sha.new(salt+user.username).hexdigest()\r\n return self.create(user=user,\r\n activation_key=activation_key)"
] | [
"0.75975007",
"0.74751395",
"0.7427103",
"0.73537993",
"0.7324293",
"0.7319972",
"0.7295748",
"0.7285775",
"0.7275217",
"0.7270615",
"0.7237489",
"0.72268796",
"0.72268796",
"0.72268796",
"0.7216276",
"0.7180537",
"0.716592",
"0.7164825",
"0.71644413",
"0.7159965",
"0.7144012",
"0.71275973",
"0.7117181",
"0.710969",
"0.7108435",
"0.7101867",
"0.7091569",
"0.7084894",
"0.7069145",
"0.70556456"
] | 0.8317838 | 0 |
Resolver should be able to produce a value for a given key. If key doesn't exist, should return None. | def resolve(self, key: str) -> Optional[Any]:
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def resolve(self, key: str) -> Optional[Any]:\n return self.dict.get(key)",
"def get(self, key: K)-> Optional[V]:\n return self._func(key)",
"def lookup(self, key):\n n = self.find(key)\n if n:\n return n.value\n else:\n return False",
"def _get(self, key):\n try:\n val = getattr(self, f\"_{key}\")\n if val is not None:\n return val\n else:\n self._load()\n return getattr(self, f\"_{key}\")\n except AttributeError:\n return None",
"def get(self, key, fallback):\r\n try:\r\n return self[key]\r\n except (KeyError, IndexError):\r\n return fallback",
"def get_value(self, key):\r\n if self.hash_table[self.horner_hash(key)] is not None:\r\n if self.hash_table[self.horner_hash(key)].key == key:\r\n return self.hash_table[self.horner_hash(key)].value\r\n else:\r\n return None",
"def get(self, key: Union[Any, int]) -> Union[Any, Sequence[Any]]:\n try:\n return[key]\n except KeyError:\n return self.default_factory",
"def get_value(self, key: str) -> Optional[str]:\n raise NotImplementedError",
"def get(self, key: K) -> Optional[V]:\n return self.mget([key])[0]",
"def get_or_call(self, key, callback, ttl=None):\n if self.contains(key):\n res = self[key]\n else:\n res = callback()\n self.set(key, res, ttl=ttl)\n return res",
"def get_if_exist(self, data, key):\n if key in data:\n return data[key]\n return None",
"def get_value(self, key: str) -> Any:\r\n if key is None:\r\n return self.data\r\n try:\r\n return self.data[key]\r\n except KeyError:\r\n return None",
"def _safe_read(self, source: dict, key: str, mapper: Callable[[str], any]) -> any:\n return mapper(source[key]) if key in source else None",
"def _resolve_with_default(\n self,\n key: Union[str, int, Enum],\n value: Any,\n default_value: Any = DEFAULT_VALUE_MARKER,\n ) -> Any:\n\n def is_mandatory_missing(val: Any) -> bool:\n return get_value_kind(val) == ValueKind.MANDATORY_MISSING # type: ignore\n\n value = _get_value(value)\n has_default = default_value is not DEFAULT_VALUE_MARKER\n if has_default and (value is None or is_mandatory_missing(value)):\n return default_value\n\n resolved = self._resolve_interpolation(\n key=key,\n value=value,\n throw_on_missing=not has_default,\n throw_on_resolution_failure=not has_default,\n )\n if resolved is None and has_default:\n return default_value\n\n if is_mandatory_missing(resolved):\n if has_default:\n return default_value\n else:\n raise MissingMandatoryValue(\"Missing mandatory value: $FULL_KEY\")\n\n return _get_value(resolved)",
"def lookup(self, key):",
"def _single_getitem(self, key):\n try:\n return self._dict[key]\n except KeyError:\n return self.default",
"def get(self, key: Hashable) -> Any: # type: ignore\n try:\n return[key]\n except (KeyError, TypeError):\n if self.default_factory is None:\n raise KeyError(f'{key} is not in {self.__class__}')\n else:\n try:\n return self.default_factory()\n except TypeError:\n return self.default_factory",
"def get(self, key, default=None):\n def find(found_item, _):\n \"\"\" This is the closer function which will be passed to find by key function , if key found than return the value \n otherwise return blanck\"\"\"\n if found_item:\n return found_item[1]\n else:\n return default\n\n return self._find_by_key(key, find)",
"def get(\n self,\n key: str,\n ) -> T.Optional[VALUE]:\n record = self._get_record_from_backend(key)\n if record is None:\n return None\n\n if record.expire:\n now = utc_now()\n if (now.timestamp() - record.update_ts) < record.expire:\n return self.deserialize(record.value)\n else:\n return None\n else:\n return self.deserialize(record.value)",
"def __getitem__(self, key):\n if self._root:\n node = self._getItemHelper(key, self._root)\n if node:\n return node.value\n else:\n return None\n else:\n return None",
"def resolve_resolver_value(self, resolver: \"Resolver\") -> Any:\n try:\n return resolver.resolve()\n except RecursiveResolve:\n # Recursive resolve issues shouldn't be masked by a placeholder.\n raise\n except Exception:\n if are_placeholders_enabled():\n placeholder_value = create_placeholder_value(\n resolver, self.placeholder_type\n )\n\n self.logger.debug(\n \"Error encountered while resolving the resolver. This is allowed for the current \"\n f\"operation. Resolving it to a placeholder value instead: {placeholder_value}\"\n )\n return placeholder_value\n raise",
"def get(self, key):\n if self.defs:\n name = self.defs[0]\n val = self.defs[1]\n old_self = self.defs[2]\n if key == name:\n return val\n else:\n return old_self.get(key)",
"def get(self, key, default=None):\r\n try:\r\n return self.data[key]()\r\n except (KeyError, SleekRefDied):\r\n return default",
"def value(self, key):\n item = self.default(key)\n return self.__getSafeValue(key, item)",
"def get(self, key):\n if key in self.fields:\n return self.fields.get(key).get()\n return None",
"def resolve(self, key: str) -> Any:\n return _ba.resolve_appconfig_value(key)",
"def __getitem__(self, key):\n result = mongo['readable-api'].foo.find_one({\"foo\": key})\n if result:\n return self.make_child(key)\n return None",
"def get(self, key, default=None):",
"def resolve(self, section, key):\n\n return self.sections[section][key]",
"def get(self, key):\n if type(key) != str:\n raise TypeError(\"This is not the string you're looking for!\")\n number = self._hash(key)\n stored_key = number if self.function == 'fnv' else key\n try:\n return self.bucket_list[number % self.bucket_number].search(stored_key).stored_value\n except AttributeError:\n return None"
] | [
"0.7909129",
"0.6813241",
"0.67681676",
"0.66341573",
"0.64758044",
"0.6463813",
"0.64607257",
"0.6392654",
"0.6385483",
"0.63703537",
"0.63212407",
"0.6292602",
"0.6284046",
"0.62764496",
"0.619645",
"0.6181006",
"0.617452",
"0.614984",
"0.6148691",
"0.6125853",
"0.61204463",
"0.611363",
"0.610535",
"0.6089134",
"0.60884",
"0.60846543",
"0.60740733",
"0.60708284",
"0.6038302",
"0.6037554"
] | 0.75538653 | 1 |
Resolver should be able to produce a value for a given key. If key doesn't exist, should return None. | def resolve(self, key: str) -> Optional[Any]:
return self.dict.get(key) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def resolve(self, key: str) -> Optional[Any]:\n pass",
"def get(self, key: K)-> Optional[V]:\n return self._func(key)",
"def lookup(self, key):\n n = self.find(key)\n if n:\n return n.value\n else:\n return False",
"def _get(self, key):\n try:\n val = getattr(self, f\"_{key}\")\n if val is not None:\n return val\n else:\n self._load()\n return getattr(self, f\"_{key}\")\n except AttributeError:\n return None",
"def get(self, key, fallback):\r\n try:\r\n return self[key]\r\n except (KeyError, IndexError):\r\n return fallback",
"def get_value(self, key):\r\n if self.hash_table[self.horner_hash(key)] is not None:\r\n if self.hash_table[self.horner_hash(key)].key == key:\r\n return self.hash_table[self.horner_hash(key)].value\r\n else:\r\n return None",
"def get(self, key: Union[Any, int]) -> Union[Any, Sequence[Any]]:\n try:\n return[key]\n except KeyError:\n return self.default_factory",
"def get_value(self, key: str) -> Optional[str]:\n raise NotImplementedError",
"def get(self, key: K) -> Optional[V]:\n return self.mget([key])[0]",
"def get_or_call(self, key, callback, ttl=None):\n if self.contains(key):\n res = self[key]\n else:\n res = callback()\n self.set(key, res, ttl=ttl)\n return res",
"def get_if_exist(self, data, key):\n if key in data:\n return data[key]\n return None",
"def get_value(self, key: str) -> Any:\r\n if key is None:\r\n return self.data\r\n try:\r\n return self.data[key]\r\n except KeyError:\r\n return None",
"def _safe_read(self, source: dict, key: str, mapper: Callable[[str], any]) -> any:\n return mapper(source[key]) if key in source else None",
"def _resolve_with_default(\n self,\n key: Union[str, int, Enum],\n value: Any,\n default_value: Any = DEFAULT_VALUE_MARKER,\n ) -> Any:\n\n def is_mandatory_missing(val: Any) -> bool:\n return get_value_kind(val) == ValueKind.MANDATORY_MISSING # type: ignore\n\n value = _get_value(value)\n has_default = default_value is not DEFAULT_VALUE_MARKER\n if has_default and (value is None or is_mandatory_missing(value)):\n return default_value\n\n resolved = self._resolve_interpolation(\n key=key,\n value=value,\n throw_on_missing=not has_default,\n throw_on_resolution_failure=not has_default,\n )\n if resolved is None and has_default:\n return default_value\n\n if is_mandatory_missing(resolved):\n if has_default:\n return default_value\n else:\n raise MissingMandatoryValue(\"Missing mandatory value: $FULL_KEY\")\n\n return _get_value(resolved)",
"def lookup(self, key):",
"def _single_getitem(self, key):\n try:\n return self._dict[key]\n except KeyError:\n return self.default",
"def get(self, key: Hashable) -> Any: # type: ignore\n try:\n return[key]\n except (KeyError, TypeError):\n if self.default_factory is None:\n raise KeyError(f'{key} is not in {self.__class__}')\n else:\n try:\n return self.default_factory()\n except TypeError:\n return self.default_factory",
"def get(self, key, default=None):\n def find(found_item, _):\n \"\"\" This is the closer function which will be passed to find by key function , if key found than return the value \n otherwise return blanck\"\"\"\n if found_item:\n return found_item[1]\n else:\n return default\n\n return self._find_by_key(key, find)",
"def get(\n self,\n key: str,\n ) -> T.Optional[VALUE]:\n record = self._get_record_from_backend(key)\n if record is None:\n return None\n\n if record.expire:\n now = utc_now()\n if (now.timestamp() - record.update_ts) < record.expire:\n return self.deserialize(record.value)\n else:\n return None\n else:\n return self.deserialize(record.value)",
"def __getitem__(self, key):\n if self._root:\n node = self._getItemHelper(key, self._root)\n if node:\n return node.value\n else:\n return None\n else:\n return None",
"def resolve_resolver_value(self, resolver: \"Resolver\") -> Any:\n try:\n return resolver.resolve()\n except RecursiveResolve:\n # Recursive resolve issues shouldn't be masked by a placeholder.\n raise\n except Exception:\n if are_placeholders_enabled():\n placeholder_value = create_placeholder_value(\n resolver, self.placeholder_type\n )\n\n self.logger.debug(\n \"Error encountered while resolving the resolver. This is allowed for the current \"\n f\"operation. Resolving it to a placeholder value instead: {placeholder_value}\"\n )\n return placeholder_value\n raise",
"def get(self, key):\n if self.defs:\n name = self.defs[0]\n val = self.defs[1]\n old_self = self.defs[2]\n if key == name:\n return val\n else:\n return old_self.get(key)",
"def get(self, key, default=None):\r\n try:\r\n return self.data[key]()\r\n except (KeyError, SleekRefDied):\r\n return default",
"def value(self, key):\n item = self.default(key)\n return self.__getSafeValue(key, item)",
"def get(self, key):\n if key in self.fields:\n return self.fields.get(key).get()\n return None",
"def resolve(self, key: str) -> Any:\n return _ba.resolve_appconfig_value(key)",
"def __getitem__(self, key):\n result = mongo['readable-api'].foo.find_one({\"foo\": key})\n if result:\n return self.make_child(key)\n return None",
"def get(self, key, default=None):",
"def resolve(self, section, key):\n\n return self.sections[section][key]",
"def get(self, key):\n if type(key) != str:\n raise TypeError(\"This is not the string you're looking for!\")\n number = self._hash(key)\n stored_key = number if self.function == 'fnv' else key\n try:\n return self.bucket_list[number % self.bucket_number].search(stored_key).stored_value\n except AttributeError:\n return None"
] | [
"0.75538653",
"0.6813241",
"0.67681676",
"0.66341573",
"0.64758044",
"0.6463813",
"0.64607257",
"0.6392654",
"0.6385483",
"0.63703537",
"0.63212407",
"0.6292602",
"0.6284046",
"0.62764496",
"0.619645",
"0.6181006",
"0.617452",
"0.614984",
"0.6148691",
"0.6125853",
"0.61204463",
"0.611363",
"0.610535",
"0.6089134",
"0.60884",
"0.60846543",
"0.60740733",
"0.60708284",
"0.6038302",
"0.6037554"
] | 0.7909129 | 0 |
Determines the number of files each node will process in scatter gather environment | def number_of_files_per_node(files, number_of_nodes):
files_per_node = float(len(files))/float(number_of_nodes)
if files_per_node > 0.:
return int(math.floor(files_per_node))
else:
return int(math.ceil(files_per_node)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fileCount(self):\n pass",
"def countDataSize(self,filename):\n \n d = h5py.File(filename,'r')\n features = d['spectrometer/features'][:]\n select = self.selectData(features.astype(float), self.ifeature, d)\n N = len(features[select])\n d.close()\n\n N = (N//self.offsetLen) * self.offsetLen\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)",
"def numberFiles(self):\n return self.n",
"def countDataSize(self,filename):\n \n try:\n d = h5py.File(filename,'r')\n except:\n print(filename)\n return \n\n N = 0\n scan_edges = d['level2/Statistics/scan_edges'][:]\n for (start,end) in scan_edges:\n N += (end-start)//self.offsetLen * self.offsetLen\n d.close()\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)",
"def __len__(self):\n return self._num_samples_per_file * len(self._files) // self._world_size",
"def getFileCount(self) -> int:\n ...",
"def num_partitions(self): # -> int:\n ...",
"def number_of_workers():\n return (cpu_count() * 2) + 1",
"def fileCounter(directory):",
"def __number_of_files(self):\n self.__get_files()\n return len(self.files)",
"def n_file(self):\n self.assert_is_dir_and_exists()\n n = 0\n for _ in self.select_file(recursive=True):\n n += 1\n return n",
"def count_data_items(fileids, train=True):\n sizes = 28000 if train else 22500\n return len(fileids) * sizes",
"def totalfiles(self):\n return len([sz for sz in self.iterate()])",
"def num_partitions(self): # -> None:\n ...",
"def n_total_files(self):\n return len(self.fileinfo)",
"def number_of_workers():\n return (multiprocessing.cpu_count() * 2) + 1",
"def get_num_chunks(self) -> int:",
"def get_number_files(dataset):\n HOME = os.environ['HOME']\n # cmds = ['das_client.py', '--query', 'summary dataset=%s' % dataset, '--format=json',\n # '--key=%s/.globus/userkey.pem' % HOME, '--cert=%s/.globus/usercert.pem' % HOME]\n cmds = ['das_client.py', '--query', 'summary dataset=%s' % dataset, '--format=json']\n output = subprocess.check_output(cmds, stderr=subprocess.STDOUT)\n summary_dict = json.loads(output)\n return int(summary_dict['data'][0]['summary'][0]['nfiles'])",
"def num_chunking_units(self):\n if self._source_paths:\n return len(self._source_paths)\n return 1",
"def n_subfile(self):\n self.assert_is_dir_and_exists()\n n = 0\n for _ in self.select_file(recursive=False):\n n += 1\n return n",
"def num_dataload_workers() -> int:\n return 4 if common_util.is_linux() else 0",
"def num_processes():\n return 1",
"def part1():\n program = read_input()\n root = build_filesystem(program)\n all_sizes = root.make_size_list()\n return sum(size for size in all_sizes if size <= 100000)",
"def get_amount_of_data(directory: str):\n size = sum([os.path.getsize(os.path.join(directory, item)) for item in os.listdir(directory) if os.path.isfile(os.path.join(directory, item))])\n print(size)\n return size",
"def numberFiles(self):\n with open(self.inputfile) as fin:\n for n, _ in enumerate(fin, start=1): pass\n self.n = n\n return self.n",
"def num_partitions(self): # -> Unknown:\n ...",
"def total_files_to_process(self) -> float:\n return pulumi.get(self, \"total_files_to_process\")",
"def getnrfiles(self):\n return len(self.filenames)",
"async def num_fomod_files_to_install(self):\n n = 0\n for f in self.fomod.files_to_install:\n if f.type == \"folder\":\n n += await self.count_folder_contents(f.source)\n else:\n n += 1\n\n return n",
"def getNumStatDataFiles(self):\n return self.nStatDataFiles"
] | [
"0.6732709",
"0.6647272",
"0.6646703",
"0.65596217",
"0.655924",
"0.6545861",
"0.65446675",
"0.6479833",
"0.64772725",
"0.6464439",
"0.6455599",
"0.6446589",
"0.6436676",
"0.641191",
"0.6406125",
"0.6391951",
"0.636867",
"0.6327254",
"0.63119394",
"0.63079107",
"0.62990075",
"0.62850386",
"0.6271203",
"0.6271065",
"0.6241946",
"0.6241917",
"0.6211973",
"0.62052494",
"0.6188796",
"0.61354184"
] | 0.7106992 | 0 |
Send a request to Slack and validate the response | def slack_request(url: str, headers: dict, data: dict) -> dict:
logger.debug(f'\nSending request to Slack API using {url}')
response = requests.post(url=url,
headers=headers,
data=data)
if response.status_code != 200:
logger.error(f'Got status {r.status_code} while trying to post to the slack url {url}.')
# todo: check for error details, since their reponse format is not always consistent then converting to json
# doesn't work all the time.
#data = response.json()
#if not data['ok']:
# logger.error(f"Got the following errors back from slack: {data}")
return response | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_slackWH_send_good(get_slackwebhook, capsys):\n s = get_slackwebhook\n s.send()\n out, err = capsys.readouterr()\n assert \"Message sent\" in out",
"def __call(self, headers, method, data):\n url = 'https://slack.com/api/'+method\n req = requests.post(\n url=url,\n data=data,\n headers=headers\n )\n return req",
"def slack_it(request):\n # Validate the Boon AI JWT\n jwt_valid = True\n encoded_jwt = request.headers.get('X-BoonAI-Signature-256').encode('utf-8')\n try:\n jwt.decode(encoded_jwt, os.environ['SECRET'], algorithms=[\"HS256\"])\n except jwt.InvalidSignatureError:\n jwt_valid = False\n\n # Send a slack message with the payload information.\n body = {\n \"text\": \"Webhook received from Boon AI\",\n \"blocks\": [\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"plain_text\",\n \"text\": \"Webhook received from Boon AI\",\n \"emoji\": True\n }\n },\n {\n \"type\": \"divider\"\n },\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": f\"*JWT Validated*: {jwt_valid}\\n \"\n f\"*JWT*: {request.headers.get('X-BoonAI-Signature-256')}\\n \"\n f\"*Content-Type*: {request.content_type}\\n \"\n f\"*Webhook Payload*\\n```{pprint.pformat(request.get_json(force=True))}```\"\n }\n }\n ]\n }\n requests.post(os.environ['SLACK_URL'], json=body)\n\n return {}",
"def post(self):\n send_slack_log('Entered /slack/submit')\n send_slack_log('Request info:')\n send_slack_log(str(request.form))\n if request.form.get('payload') is None:\n send_slack_log('Invalid request: no payload')\n return\n else:\n return handle_interaction(json.loads(request.form['payload']))",
"def hears(request):\n\n #Wit makes our responses timeout, so we ignore Slack retries\n if \"HTTP_X_SLACK_RETRY_NUM\" in request.META:\n return HttpResponse(\"OK\", 200)\n\n slack_event = json.loads(request.body)\n\n # ============= Slack URL Verification ============ #\n # In order to verify the url of our endpoint, Slack will send a challenge\n # token in a request and check for this token in the response our endpoint\n # sends back.\n # For more info: https://api.slack.com/events/url_verification\n if \"challenge\" in slack_event:\n return HttpResponse(slack_event[\"challenge\"], 200)\n #removed {\"content_type\":\"application/json\"} from flask response\n\n # ============ Slack Token Verification =========== #\n # We can verify the request is coming from Slack by checking that the\n # verification token in the request matches our app's settings\n if pyBot.verification != slack_event.get(\"token\"):\n print \"Invalid Slack verification token: %s \\npyBot has: \\\n %s\\n\\n\" % (slack_event[\"token\"], pyBot.verification)\n # By adding \"X-Slack-No-Retry\" : 1 to our response headers, we turn off\n # Slack's automatic retries during development.\n return HttpResponse(message, 403)\n\n # ====== Process Incoming Events from Slack ======= #\n # If the incoming request is an Event we've subcribed to\n if \"event\" in slack_event:\n event_type = slack_event[\"event\"][\"type\"]\n # Then handle the event by event_type and have your bot respond\n return _event_handler(event_type, slack_event)\n\n # If our bot hears things that are not events we've subscribed to,\n # send a quirky but helpful error response\n return HttpResponse(\"[NO EVENT IN SLACK REQUEST] These are not the droids\\\n you're looking for.\", 404)",
"def request_slack(api_method, params):\n url = BASE_URL + api_method\n response = requests.get(url, params=params)\n if response.status_code != 200:\n raise RuntimeError('Issue connecting to Slack API!')\n decoded_response = json.loads(response.text)\n if not decoded_response['ok']:\n raise RuntimeError('Issue pulling data from Slack API!')\n return decoded_response",
"def is_request_valid(request: request) -> bool:\n \n key = os.environ.get(\"SLACK_SIGNING_SECRET\")\n basestring = 'v0:' + request.headers['X-Slack-Request-Timestamp'] + ':' + str(request.get_data(), 'utf-8')\n\n # Hash the basestring using the signing secret as the key in order to get the signature\n signature = 'v0=' + hmac.new(\n bytes(key, 'utf-8'),\n bytes(basestring, 'utf-8'),\n hashlib.sha256\n ).hexdigest()\n slacksig = request.headers['X-Slack-Signature']\n\n # If the signature is equal to the signature sent by slack, then it is indeed from slack.\n return hmac.compare_digest(slacksig, signature)",
"def save_slack_token(request):\n logger.debug(\"Slack callback just landed\")\n\n error = request.query_params.get('error', False)\n error_description = request.query_params.get('error_description', '')\n if error:\n raise APIException(\"Slack: \" + error_description)\n\n original_payload = request.query_params.get('payload', None)\n payload = request.query_params.get('payload', None)\n if payload is None:\n raise ValidationError(\"No payload specified\")\n else:\n try:\n payload = base64.b64decode(payload).decode(\"utf-8\")\n payload = parse_qs(payload)\n except:\n raise ValidationError(\"Cannot decode payload in base64\")\n\n if \"url\" not in payload:\n logger.exception(payload)\n raise ValidationError(\"No url specified from the slack payload\")\n\n if \"user\" not in payload:\n logger.exception(payload)\n raise ValidationError(\"No user id specified from the slack payload\")\n\n if \"a\" not in payload:\n logger.exception(payload)\n raise ValidationError(\"No academy id specified from the slack payload\")\n\n try:\n academy = Academy.objects.get(id=payload[\"a\"][0])\n except Exception as e:\n raise ValidationError(\"Not exist academy with that id\") from e\n\n user = None\n try:\n user = User.objects.get(id=payload[\"user\"][0])\n except Exception as e:\n raise ValidationError(\"Not exist user with that id\") from e\n\n code = request.query_params.get('code', None)\n if code is None:\n raise ValidationError(\"No slack code specified\")\n\n params = {\n 'client_id': os.getenv('SLACK_CLIENT_ID', \"\"),\n 'client_secret': os.getenv('SLACK_SECRET', \"\"),\n 'redirect_uri': os.getenv('SLACK_REDIRECT_URL', \"\")+\"?payload=\"+original_payload,\n 'code': code,\n }\n # print(\"params\", params)\n resp = requests.post('https://slack.com/api/oauth.v2.access', data=params)\n if resp.status_code == 200:\n\n logger.debug(\"Slack responded with 200\")\n\n slack_data = resp.json()\n if 'access_token' not in slack_data:\n print(\"Slack response body\", slack_data)\n raise APIException(\"Slack error status: \"+slack_data['error'])\n\n slack_data = resp.json()\n logger.debug(slack_data)\n\n # delete all previous credentials for the same team and cohort\n CredentialsSlack.objects.filter(\n app_id=slack_data['app_id'], team_id=slack_data['team']['id'], user__id=user.id).delete()\n credentials = CredentialsSlack(\n user=user,\n app_id=slack_data['app_id'],\n bot_user_id=slack_data['bot_user_id'],\n token=slack_data['access_token'],\n team_id=slack_data['team']['id'],\n team_name=slack_data['team']['name'],\n authed_user=slack_data['authed_user']['id'],\n )\n credentials.save()\n\n team = SlackTeam.objects.filter(\n academy__id=academy.id, slack_id=slack_data['team']['id']).first()\n if team is None:\n team = SlackTeam(\n slack_id=slack_data['team']['id'],\n owner=user,\n academy=academy\n )\n\n team.name = slack_data['team']['name']\n team.save()\n\n return HttpResponseRedirect(redirect_to=payload[\"url\"][0])",
"def test_validation(self):\n challenge = \"challenge-string\"\n data = {\n 'hub.mode': 'subscribe',\n 'hub.verify_token': settings.VERIFY_TOKEN,\n 'hub.challenge': challenge\n }\n c = Client()\n response = c.get(self.webhook, data=data)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(str(response.content, 'utf-8'), challenge)",
"def test_slackP_send(get_slackpost, capsys):\n s = get_slackpost\n s.send()\n out, err = capsys.readouterr()\n assert \"Message sent\" in out",
"def slack_callback(request):\n client_id = slack_access_keys[\"client_id\"]\n client_secret = slack_access_keys[\"client_secret\"]\n\n if request.method == 'GET':\n code = request.GET.get('code')\n get_token_url = \"https://slack.com/api/oauth.access?client_id={}&client_secret={}&code={}\".format(client_id,\n client_secret,\n code)\n r = requests.post(get_token_url,\n auth=HTTPBasicAuth(client_id, client_secret),\n headers={\"content-type\": \"application/x-www-form-urlencoded\"},\n params={\"code\": code, \"grant_type\": \"authorization_code\"})\n\n try:\n access_token = r.json()['access_token']\n\n get_activity_url = \"https://slack.com/api/users.identity\"\n r = requests.post(get_activity_url,\n headers={\"Authorization\": \"Bearer \" + access_token})\n return r.json()\n except Exception as e:\n # Authorization failed.\n return None",
"def send_message_to_slack(text):\n\n try:\n post = {\n \"text\": \":fire: :sad_parrot: *SSL Certificate BACKUP SCRIPT Status for HTTPD Proxy:* :sad_parrot: :fire:\",\n \"attachments\": [\n {\n \"text\": \"{0}\".format(text),\n \"color\": \"#B22222\",\n \"attachment_type\": \"default\",\n \"fields\": [\n {\n \"title\": \"Priority\",\n \"value\": \"High\",\n \"short\": \"false\"\n }\n ],\n \"footer\": \"AWS HTTPD\",\n \"footer_icon\": \"https://platform.slack-edge.com/img/default_application_icon.png\"\n }\n ]\n }\n\n ssm_param_name = 'slack_notification_webhook'\n ssm = boto3.client('ssm', config=CONFIG, region_name='eu-west-2')\n try:\n response = ssm.get_parameter(\n Name=ssm_param_name, WithDecryption=True)\n except ClientError as e:\n if e.response['Error']['Code'] == 'ParameterNotFound':\n LOGGER.info(\n 'Slack SSM parameter %s not found. No notification sent', ssm_param_name)\n return\n else:\n logging.error(\n \"Unexpected error when attempting to get Slack webhook URL: %s\", e)\n return\n if 'Value' in response['Parameter']:\n url = response['Parameter']['Value']\n\n json_data = json.dumps(post)\n req = urllib.request.Request(\n url,\n data=json_data.encode('ascii'),\n headers={'Content-Type': 'application/json'})\n LOGGER.info('Sending notification to Slack')\n response = urllib.request.urlopen(req)\n\n else:\n LOGGER.info(\n 'Value for Slack SSM parameter %s not found. No notification sent', ssm_param_name)\n return\n\n except Exception as err:\n logging.error(\n 'The following error has occurred on line: %s',\n sys.exc_info()[2].tb_lineno)\n logging.error(str(err))",
"def slack_me(msg):\n # sanitise.\n msg = unicodedata.normalize('NFKD',msg).encode('ascii','ignore').decode('ascii')\n msg = re.sub('[^\\w\\s\\-.,;?!@#()\\[\\]]','', msg)\n r = requests.post(url=os.environ['SLACK_WEBHOOK'],\n headers={'Content-type': 'application/json'},\n data=f\"{{'text': '{msg}'}}\")\n if r.status_code == 200 and r.content == b'ok':\n return True\n else:\n return False",
"def send(self):\n payload = self.format_payload()\n\n # Makes sure that the required fields are provided before\n # sending the payload.\n if not self.webhook_url:\n print ('Error: Webhook URL is required.')\n\n elif not payload:\n print ('Error: Message payload cannot be empty.')\n\n else:\n try:\n request = requests.post(self.webhook_url,\n data=json.dumps(payload),\n headers={'Content-Type': 'application/json'})\n\n request.raise_for_status()\n\n except requests.exceptions.RequestException as error:\n print('Error: %s' % error)",
"def _perform_http_request(\n self, *, body: Dict[str, any], headers: Dict[str, str]\n ) -> WebhookResponse:\n body = json.dumps(body)\n headers[\"Content-Type\"] = \"application/json;charset=utf-8\"\n\n if self.logger.level <= logging.DEBUG:\n self.logger.debug(\n f\"Sending a request - url: {self.url}, body: {body}, headers: {headers}\"\n )\n try:\n url = self.url\n opener: Optional[OpenerDirector] = None\n # for security (BAN-B310)\n if url.lower().startswith(\"http\"):\n req = Request(\n method=\"POST\", url=url, data=body.encode(\"utf-8\"), headers=headers\n )\n if self.proxy is not None:\n if isinstance(self.proxy, str):\n opener = urllib.request.build_opener(\n ProxyHandler({\"http\": self.proxy, \"https\": self.proxy}),\n HTTPSHandler(context=self.ssl),\n )\n else:\n raise SlackRequestError(\n f\"Invalid proxy detected: {self.proxy} must be a str value\"\n )\n else:\n raise SlackRequestError(f\"Invalid URL detected: {url}\")\n\n # NOTE: BAN-B310 is already checked above\n resp: Optional[HTTPResponse] = None\n if opener:\n resp = opener.open(req, timeout=self.timeout) # skipcq: BAN-B310\n else:\n resp = urlopen( # skipcq: BAN-B310\n req, context=self.ssl, timeout=self.timeout\n )\n charset: str = resp.headers.get_content_charset() or \"utf-8\"\n response_body: str = resp.read().decode(charset)\n resp = WebhookResponse(\n url=url,\n status_code=resp.status,\n body=response_body,\n headers=resp.headers,\n )\n _debug_log_response(self.logger, resp)\n return resp\n\n except HTTPError as e:\n # read the response body here\n charset = e.headers.get_content_charset() or \"utf-8\"\n body: str = e.read().decode(charset)\n resp = WebhookResponse(\n url=url,\n status_code=e.code,\n body=body,\n headers=e.headers,\n )\n if e.code == 429:\n # for backward-compatibility with WebClient (v.2.5.0 or older)\n resp.headers[\"Retry-After\"] = resp.headers[\"retry-after\"]\n _debug_log_response(self.logger, resp)\n return resp\n\n except Exception as err:\n self.logger.error(f\"Failed to send a request to Slack API server: {err}\")\n raise err",
"def __notify_slack(self):\n\t\ttry:\n\t\t\tprint(\"[+] Sending Slack notifications...\")\n\t\t\tslack_http_headers = {\n\t\t\t\t'User-Agent': 'GitHubScrap',\n\t\t\t\t'Content-type': 'application/json',\n\t\t\t}\n\t\t\tslack_http_data = {}\n\t\t\tfor ix in range(0,len(self.final_results[\"results\"]),SLACK_CHUNK_SIZE):\n\t\t\t\tdata_to_send = \"\"\n\t\t\t\tchunk_results = self.final_results[\"results\"][ix:ix+SLACK_CHUNK_SIZE]\n\t\t\t\tfor url in chunk_results:\n\t\t\t\t\tdata_to_send += \"{} ({})\\n\".format(url[\"query\"], url[\"link\"])\n\n\t\t\t\tslack_http_data.update({\n\t\t\t\t\t'text': data_to_send,\n\t\t\t\t})\n\t\t\t\trequests.post(\n\t\t\t\t\tself.slack_webhook,\n\t\t\t\t\theaders = slack_http_headers,\n\t\t\t\t\tdata = json.dumps(slack_http_data),\n\t\t\t\t)\n\t\t\t\tsleep(SLACK_HTTP_DELAY)\n\n\t\texcept Exception as exception:\n\t\t\traise MsgException('Slack notifications could not be sent', exception)",
"def slash_command():\n form_text = request.form[\"text\"]\n \n if len(form_text) > 0:\n data = {\n \"response_type\": \"in_channel\",\n \"text\": \"My response\",\n }\n else:\n \"\"\"\n If the user didn't type a message send a note that only\n they see about typing a message\n \"\"\"\n data = {\n \"response_type\": \"ephemeral\",\n \"text\": \"Error: No status message entered. Please try again.\",\n }\n\n \"\"\"\n Create the response object to send to Mattermost with the\n data object written as json, 200 status, and proper mimetype\n \"\"\"\n response = app.response_class(\n response=json.dumps(data),\n status=200,\n mimetype='application/json'\n )\n return response",
"def test_bot_message():\n send_json_message_to_bot(request.get_json())\n return \"ok\"",
"def flask_slack_test():\n _log('@channel: slack is working?')\n return 'slack test'",
"def do_GET(self): # pylint: disable=invalid-name\n parsed_url = urlparse(self.path)\n parsed_query = parse_qs(parsed_url.query)\n\n helper.log_info(f'Incoming request from {self.client_address[0]} - {self.path}')\n\n # Strava webhook expects a reply with the hub.challenge parameter\n challenge = parsed_query['hub.challenge'][0]\n request_verify_token = parsed_query['hub.verify_token'][0]\n\n # Respond with hub.challenge parameter if verify_token is correct\n if request_verify_token == verify_token:\n self.write_response(200, {\"hub.challenge\": challenge})\n else:\n self.write_empty_response(400)",
"async def send_response(\n self, response_url: Optional[str] = None, **kwargs: Optional[Any]\n ):\n req_args = dict(\n # contents of messenger[UserDict]\n **self,\n # any other API fields\n **kwargs,\n )\n\n api_url = response_url or self.response_url\n\n res = await self.client._request( # noqa\n http_verb=\"POST\", api_url=api_url, req_args=dict(json=req_args)\n )\n\n status = res[\"status_code\"]\n\n if status != 200:\n raise SlackApiError(\n message=\"Failed to send response_url: {}: status={}\".format(\n api_url, status\n ),\n response=res,\n )\n\n return True",
"def send_slack_notification(url: str, title: str, message: str):\n\n content = {\n \"text\": f\"{title}\",\n \"blocks\": [\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": f\"{message}\",\n },\n }\n ],\n }\n\n response = requests.post(url, json=content)\n\n # Raise exception if response is not 200\n response.raise_for_status()",
"def webhook_sender(url=WEBHOOK_URL):\n data = runner()\n print(json.dumps(data))\n try:\n r = requests.post(url,json=data)\n print(r)\n except requests.exceptions.RequestException as e:\n raise SystemExit(e)",
"def get_slack_token(request):\n url = request.query_params.get('url', None)\n if url is None:\n raise ValidationError(\"No callback URL specified\")\n\n user_id = request.query_params.get('user', None)\n if user_id is None:\n raise ValidationError(\"No user specified on the URL\")\n\n academy = request.query_params.get('a', None)\n if academy is None:\n raise ValidationError(\"No academy specified on the URL\")\n\n url = base64.b64decode(url).decode(\"utf-8\")\n # Missing scopes!! admin.invites:write, identify\n scopes = (\"app_mentions:read\", \"channels:history\", \"channels:join\", \"channels:read\",\n \"chat:write\", \"chat:write.customize\", \"commands\", \"files:read\", \"files:write\",\n \"groups:history\", \"groups:read\", \"groups:write\", \"incoming-webhook\", \"team:read\",\n \"users:read\", \"users:read.email\", \"users.profile:read\", \"users:read\")\n\n query_string = f'a={academy}&url={url}&user={user_id}'.encode(\"utf-8\")\n payload = str(base64.urlsafe_b64encode(query_string), \"utf-8\")\n params = {\n \"client_id\": os.getenv('SLACK_CLIENT_ID', \"\"),\n \"redirect_uri\": os.getenv('SLACK_REDIRECT_URL', \"\")+\"?payload=\"+payload,\n \"scope\": \",\".join(scopes)\n }\n redirect = \"https://slack.com/oauth/v2/authorize?\"\n for key in params:\n redirect += f\"{key}={params[key]}&\"\n\n if settings.DEBUG:\n return HttpResponse(f\"Redirect to: <a href='{redirect}'>{redirect}</a>\")\n else:\n return HttpResponseRedirect(redirect_to=redirect)",
"def send(data, webhook_url):\n dis_data = data\n url = webhook_url\n headers = {\"Content-Type\": \"application/json\"}\n discord_request = requests.post(url, data=json.dumps(dis_data), headers=headers)\n\n try:\n discord_request.raise_for_status()\n except requests.exceptions.HTTPError as err:\n print(err)\n else:\n print(\"Payload delivered successfully, code {}.\".format(discord_request.status_code))",
"def _send_request(self, url, text=None, params=None):\n if params is not None:\n for k, v in params.items():\n params[k] = v.encode(\"utf-8\")\n else:\n params = {}\n\n params['email'] = self._username\n\n if self._password:\n params['pass'] = self._password\n\n if self._hash:\n params['hash'] = self._hash\n\n if text is not None:\n params['s'] = self._stripslashes(text)\n\n\n try:\n response = requests.post(url, data=params)\n except Exception as e:\n print(str(e))\n\n result = response.content.decode('utf-8')\n \n\n try:\n json_data = json.loads(result)\n except ValueError as e:\n print(str(e))\n\n if json_data['status'] == \"Success\":\n return json_data\n elif json_data['status'] == \"Failure\":\n if json_data['error'].startswith(\"Error Authenticating.\"):\n print(json_data['error'])\n else:\n print(json_data['error'])\n else:\n print(json_data)",
"def slack(message):\n slack_hook = 'https://hooks.slack.com/services/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'\n headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}\n requests.post(slack_hook, json.dumps({'text': message}), headers=headers)",
"def slackMessage(binState):\n log = logging.getLogger('iob')\n\n if binState:\n location = \"Out\"\n else:\n location = \"In\"\n \n url = \"https://hooks.slack.com/services/{}\"\n \n payload = {\"text\": \"Bin is: {}\".format(location)}\n\n headers = {\"Content-Type\": \"application/json\"}\n\n response = requests.request(\n \"POST\",\n url,\n data=json.dumps(payload),\n headers=headers\n )\n\n log.debug(response.text)\n return",
"def send_request(url, user, passwd, payload):\n response = requests.post(url,\n data=json.dumps(payload),\n auth=(user, passwd),\n verify=False,\n timeout=30)\n\n if response.status_code != 200:\n print(\"Status code {}\".format(response.status_code))\n return ERR_STATUS_CODE\n\n try:\n print(json.dumps(response.json(), indent = 4, sort_keys=True))\n except ValueError:\n print(\"{}\".format(response.text))\n return ERR_WRONG_JSON\n\n return SUCCESS",
"def slackbuild_webhook(req: Request):\n global config\n global slack\n global cloudbuild\n\n # slack submits a POST\n if req.method != \"POST\":\n return abort(405)\n\n # not a true request from slack\n verified, err = slack.verify_webhook(req)\n if not verified:\n print(err)\n return abort(403)\n\n body = Slack.parse_request(req)\n argv = Slack.parse_command(body)\n msg = \"\"\n\n output, success = Command.run(argv, cloudbuild, config)\n\n if output is None:\n if success:\n # intentionaly not responding with a slack message\n return ('', 200)\n else:\n return abort(500)\n elif Slack.is_interactive_message(body):\n msg = slack.render_interactive_message(body, success, output)\n else:\n color = Colors.SUCCESS if success else Colors.FAILURE\n msg = slack.render_message({\"result\": output, \"color\": color}, \"command.json\")\n\n msg = json.dumps(msg)\n print(msg)\n return Response(response=msg, content_type=\"application/json\")"
] | [
"0.67223775",
"0.66398156",
"0.6534951",
"0.6358252",
"0.6333655",
"0.62352383",
"0.6171377",
"0.6148408",
"0.6085096",
"0.6081135",
"0.60650325",
"0.6040874",
"0.6038498",
"0.60302174",
"0.59917426",
"0.59649956",
"0.59637076",
"0.5943963",
"0.59014314",
"0.58998",
"0.5889386",
"0.5874803",
"0.5823254",
"0.57881576",
"0.5778226",
"0.57659745",
"0.5765907",
"0.5757712",
"0.57409817",
"0.573228"
] | 0.72009796 | 0 |
Create a report about stale branches for a list of repositories. | def check_stale_branches(event: dict, context) -> dict:
ssm_parameters = load_params('dev_tools', 'dev')
if 'jira_statuses_for_task_completion' in ssm_parameters and ssm_parameters['jira_statuses_for_task_completion']:
jira_statuses_for_task_completion = ssm_parameters['jira_statuses_for_task_completion']
else:
jira_statuses_for_task_completion = ('Resolved', 'Closed')
repository_names = ssm_parameters['github_repository_names']
github_repository_names = repository_names.split(',')
jira_oauth_dict = {
'access_token': ssm_parameters['jira_access_token'],
'access_token_secret': ssm_parameters['jira_access_token_secret'],
'consumer_key': ssm_parameters['jira_consumer_key'],
'key_cert': ssm_parameters['jira_private_key']
}
auth_jira = JIRA(ssm_parameters['jira_url'], oauth=jira_oauth_dict)
# Github authentication setup
g = Github(ssm_parameters['github_access_token'])
# Look for stale branches for all the specified repos
total_stale_branches = 0
general_report = ''
author_count = defaultdict(int)
for repo_name in github_repository_names:
logger.debug(f'\nChecking repo: {repo_name}')
try:
repo = g.get_repo(f"{ssm_parameters['github_account']}/{repo_name}")
except GithubException:
logger.error(f"Github repository '{ssm_parameters['github_account']}/{repo_name}' not found!")
continue
repo_report = ''
# confirm the name for the main develop branch
main_develop_branch = 'develop'
try:
_ = repo.get_branch('develop')
except GithubException:
main_develop_branch = 'master'
logger.debug('Develop branch not found, using master as the main develop branch.')
continue
branches = repo.get_branches()
for branch in branches:
# only check feature and hotfix branches
if not branch.name.startswith('feature/') and not branch.name.startswith('hotfix/'):
continue
# compare the branch against the main develop branch
try:
comparison = repo.compare(main_develop_branch, branch.name)
except GithubException as error:
logger.error(f'GithubException: Error while trying to compare {main_develop_branch} and {branch.name}.')
logger.error(f'GithubException: {error}.')
if comparison.behind_by == 0:
# the branch is up to date, nothing to do
continue
# try to get the jira ticket number from the branch name
ticket = None
result = re.search(r'feature/(?P<ticket>[a-zA-Z]+-[0-9]+).*', branch.name)
if result:
ticket = result.groupdict()['ticket'].upper()
try:
issue = auth_jira.issue(ticket)
except jira_exceptions.JIRAError:
logger.debug(f"The ticket {ticket} specified in the branch name doesn't exist in Jira.")
if issue and issue.fields.status.name not in jira_statuses_for_task_completion:
# the issue hasn't been marked as resolved in jira, so the branch may still be needed
continue
author = branch.commit.author.login if branch.commit.author else 'unknown'
author_count[author] += 1
repo_report += f'Branch: {branch.name}\nComparison status: {comparison.status}\nAuthor: {author}\n'
if ticket:
repo_report += f'Ticket status: "{issue.fields.status.name}\n'
repo_report += '\n'
total_stale_branches += 1
if repo_report:
general_report += f'Repo: {repo_name}, develop branch name: {main_develop_branch}\n{repo_report}'
if total_stale_branches:
count_by_author = ''
for author, count in sorted(author_count.items(), key=operator.itemgetter(1), reverse=True):
count_by_author += f'{author}: {count}\n'
report_overview = f'Current number of stale branches: {total_stale_branches}\n\n'\
f'Count by author:\n{count_by_author}\n'
report_details = f'Details:\n\n{general_report}'
_ = slack_request(url=ssm_parameters['slack_webhook_url'],
headers={'Content-type': 'application/json',
'Authorization': f"Bearer {ssm_parameters['slack_access_token']}"},
data=json.dumps({'text': report_overview})
)
_ = slack_request(url='https://slack.com/api/files.upload',
headers={'Content-type': 'application/x-www-form-urlencoded'},
data={'token': ssm_parameters['slack_access_token'],
'channels': 'GE8NS0FT5',
'content': report_details,
'title': 'Stale branches details'}
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stale_pr_branches(config, args):\n repo = config.repo\n for pr in repo.pull_requests(state=\"closed\"):\n if pr.head.repo == pr.base.repo and repo.branch(pr.head.ref):\n yield {\n \"html_url\": pr.html_url,\n \"base_branch\": pr.base.ref,\n \"head_branch\": pr.head.ref,\n }",
"def ensure_tracking_branches(args):\n man = load_manifest()\n for (name, project) in man.projects.iteritems():\n repo = GitRepo(workdir_for_project(project))\n branch_missing = repo.command(\n [\"rev-parse\", \"--verify\", \"-q\", project.refspec],\n capture_stdout=True)\n \n if branch_missing:\n logging.warn(\"Branch %s does not exist in project %s. checking out.\" %\n (project.refspec, name))\n repo.command([\"branch\", \"--track\",\n project.tracking_branch, project.remote_refspec])",
"def get_updated_all_commits(self, old_commits): # pylint: disable=too-many-locals\n\n # get new list of branches\n newest_branches_names = [branch_info['name'] for branch_info in self.get_branches()]\n\n # get old list of branches from old metadata\n old_branches_names = list(old_commits['metadata'].keys())\n\n # get old metadata\n old_commits_metadata = old_commits['metadata']\n result = {}\n\n # delete all items in old metadata where branch name is not exist in new list of branches\n for old_branch_name in old_branches_names:\n if not newest_branches_names.count(old_branch_name):\n old_commits_metadata.pop(old_branch_name)\n\n checked_commits_metadata = old_commits_metadata\n # add to checked_commits_metadata all metadata that is not exist in old_commits_metadata\n for branch in newest_branches_names:\n if not old_branches_names.count(branch):\n checked_commits_metadata[branch] = None\n\n # get dict of old commits with key - hash of commit for further mapping by branch\n repo_commits = {commit['hash']: commit for commit in old_commits['data']}\n\n # get list of new commits from all branches in repository\n for branch_name, newest_commit in checked_commits_metadata.copy().items():\n updated_list_of_branch_commits = \\\n self.get_updated_commits_by_branch(branch_name, newest_commit, only_new=True)\n if updated_list_of_branch_commits is None:\n return None\n\n # adds key 'branches' with branch name in list to every commit in branch,\n # or if key 'branches' is existing add branch name to existing branches list\n for commit_in_branch in updated_list_of_branch_commits:\n commit = repo_commits.get(commit_in_branch['hash'])\n if commit:\n commit['branches'].append(branch_name)\n else:\n commit_in_branch['branches'] = [branch_name]\n repo_commits[commit_in_branch['hash']] = commit_in_branch\n\n # add new metadata to method response for further updates by get_updated_all_commits\n if updated_list_of_branch_commits:\n checked_commits_metadata[branch_name] = updated_list_of_branch_commits[0]\n else:\n # if given old commit is the newest - add it to new metadata. P.S unnecessary ???\n checked_commits_metadata[branch_name] = newest_commit[0]\n\n updated_list_of_branch_commits.clear()\n\n # sorts all commits in repository by date in reverse order\n updated_sorted_commits = sorted(list(repo_commits.values()), key=lambda x: x['date'],\n reverse=True)\n\n result['data'] = updated_sorted_commits\n result['metadata'] = checked_commits_metadata\n\n return result",
"def get_commits(self):\n\n repo_commits = {}\n\n # gets all branches in repository\n branches = self.get_branches()\n if branches is None:\n return None\n\n # get list of commits pages from all branches in repository\n for branch in branches:\n list_of_branch_commits = self.get_commits_by_branch(branch['name'])\n if list_of_branch_commits is None:\n return None\n\n # adds key 'branches' with branch name in list to every commit in branch,\n # or if key 'branches' is existing add branch name to existing branches list\n for commit_in_branch in list_of_branch_commits:\n commit = repo_commits.get(commit_in_branch['hash'])\n if commit:\n commit['branches'].append(branch['name'])\n else:\n commit_in_branch['branches'] = [branch['name']]\n repo_commits[commit_in_branch['hash']] = commit_in_branch\n list_of_branch_commits.clear()\n\n # sorts all commits in repository by date in reverse order\n sorted_commits = sorted(list(repo_commits.values()), key=lambda x: x['date'], reverse=True)\n\n # forms a list of commits as an 'get commits API' response\n commits_amount = 30 if len(sorted_commits) >= 30 else len(sorted_commits)\n result_list = sorted_commits[:commits_amount]\n\n return result_list",
"def force_pr_branch_stale_review_dismissal(org: Organization, repo: Repository,\n branches: Dict[str, Branch]) -> List[Change[str]]:\n prb = get_pr_branch(repo, branches)\n if prb:\n return _set_dismiss_stale_approvals(prb, True)\n else:\n return []",
"def ticket_branches(self, msrp, cred_hash):\n branches = []\n repos = self.get_repos()\n if not repos['status']:\n return repos\n\n for repo in repos['data']:\n response = self.find_branch(repo_name=repo['name'], msrp=msrp, cred_hash=cred_hash)\n if response['status']:\n branches.append({'repo': repo['name'], 'branches': response['data'], 'all': response['all']})\n\n if len(branches) > 0:\n return {'status': True, 'data': branches}\n else:\n return {'status': False, 'data': f'No branches found with MSRP {msrp}'}",
"def get_commits(self):\n\n repo_commits = {}\n\n # gets all branches in repository\n branches = self.get_branches()\n if branches is None:\n raise BitbucketRequestSenderExc('Can\\'t get branches for get_commits method')\n\n # get list of commits pages from all branches in repository\n for branch in branches:\n list_of_branch_commits = self.get_commits_by_branch(branch['name'])\n if list_of_branch_commits is None:\n raise BitbucketRequestSenderExc(\n 'Can\\'t get commits by branch for get_commits method')\n\n # adds key 'branches' with branch name in list to every commit in branch,\n # or if key 'branches' is existing add branch name to existing branches list\n for commit_in_branch in list_of_branch_commits:\n commit = repo_commits.get(commit_in_branch['hash'])\n if commit:\n commit['branches'].append(branch['name'])\n else:\n commit_in_branch['branches'] = [branch['name']]\n repo_commits[commit_in_branch['hash']] = commit_in_branch\n list_of_branch_commits.clear()\n\n # sorts all commits in repository by date in reverse order\n sorted_commits = sorted(list(repo_commits.values()), key=lambda x: x['date'], reverse=True)\n\n # forms a list of commits as an 'get commits API' response\n commits_amount = 30 if len(sorted_commits) >= 30 else len(sorted_commits)\n result_list = sorted_commits[:commits_amount]\n\n return result_list",
"def fetch_branches(self):\n for jrepo in self.json_repos['repos']:\n title = str(jrepo[\"title\"])\n self.branches[title] = str(jrepo['current'])",
"async def fetch_commits(self):\n for repo in self.config['repos'].split(','):\n since = datetime.min\n async for msg in self.channel.history(limit=None):\n if not msg.embeds:\n continue\n e = msg.embeds[0]\n if e.title == 'github commit' and e.timestamp and repo in e.description: # type: ignore\n since = e.timestamp\n break\n \n await self.update_commit_activity(repo, since)",
"def get_all_commits(self):\n\n repo_commits = {}\n metadata = {}\n\n # gets all branches in repository\n branches = self.get_branches()\n if branches is None:\n return None\n\n # get list of commits pages from all branches in repository\n for branch in branches:\n list_of_branch_commits = self.get_all_commits_by_branch(branch['name'])\n\n if list_of_branch_commits is None:\n return None\n\n # adds key 'branches' with branch name in list to every commit in branch,\n # or if key 'branches' is existing add branch name to existing branches list\n for commit_in_branch in list_of_branch_commits:\n commit = repo_commits.get(commit_in_branch['hash'])\n if commit:\n commit['branches'].append(branch['name'])\n else:\n commit_in_branch['branches'] = [branch['name']]\n repo_commits[commit_in_branch['hash']] = commit_in_branch\n\n # add metadata to method response for further updates by get_updated_all_commits\n metadata[branch['name']] = list_of_branch_commits[0]\n\n list_of_branch_commits.clear()\n\n # sorts all commits in repository by date in reverse order\n sorted_commits = sorted(list(repo_commits.values()), key=lambda x: x['date'], reverse=True)\n\n return {'data': sorted_commits, 'metadata': metadata}",
"def test_sort_bzr_latest(self):\n identifiers = [\"master\", \"1.0\", \"2.0\", \"1.1\", \"1.9\", \"1.10\"]\n self.project.repo_type = REPO_TYPE_BZR\n self.project.save()\n self.project.versions.get(slug=LATEST).delete()\n\n for identifier in identifiers:\n get(\n Version,\n project=self.project,\n type=BRANCH,\n identifier=identifier,\n verbose_name=identifier,\n slug=identifier,\n )\n\n versions = list(Version.objects.filter(project=self.project))\n self.assertEqual(\n [\"2.0\", \"1.10\", \"1.9\", \"1.1\", \"1.0\", \"master\"],\n [v.slug for v in sort_version_aware(versions)],\n )",
"def get_branches(self, repo_name, cred_hash):\n branch_names = []\n\n url = f'{self.code_cloud_api.branch_api}/{repo_name}/branches?start=0&limit=30'\n response = self.code_cloud_api.get(url=url, cred_hash=cred_hash)\n if not response['status']:\n return response\n \n for item in response.get('data', {}).get('values', {}):\n branch_names.append(item.get('displayId', ''))\n\n return {'status': True, 'data': branch_names}",
"def branches_full(config, args):\n for b in config.repo.branches():\n yield config.repo.branch(b.name)",
"def checkout_branches(args):\n\n ensure_tracking_branches([])\n if check_dirty([]) and '-f' not in args:\n raise Exception(\"Cannot checkout new branches with dirty projects.\")\n \n man = load_manifest()\n for (name, project) in man.projects.iteritems():\n print >>sys.stderr, \"Checking out tracking branch in project: %s\" % name\n repo = GitRepo(workdir_for_project(project))\n # Check that sucker out\n repo.check_command([\"checkout\", project.tracking_branch])",
"def reap_broken_repos():\n token = os.environ.get('GITHUB_TOKEN', None)\n if token is None:\n print('MISSING GITHUB_TOKEN')\n return\n\n # Do graphql nonsense\n query = '''\n query{\n organization(login:\"os3224\"){\n repositories(first:100,orderBy:{field:CREATED_AT,direction:DESC}){\n nodes{\n ref(qualifiedName:\"master\") {\n target {\n ... on Commit {\n history(first: 20) {\n edges { node { oid } }\n }\n }\n }\n }\n name \n url\n }\n }\n }\n }\n '''\n url = 'https://api.github.com/graphql'\n json = {'query': query}\n headers = {'Authorization': 'token %s' % token}\n\n # Make the graph request over http\n try:\n r = requests.post(url=url, json=json, headers=headers)\n data = r.json()['data']\n organization = data['organization']\n repositories = organization['repositories']['nodes']\n except Exception as e:\n print(traceback.format_exc())\n print(f'Request to github api Failed {e}')\n return\n\n # Running map of unique_code -> assignment objects\n assignments = dict()\n\n # Parse out repo name and url from graphql response\n repos = map(lambda node: (node['name'], node['url'], node['ref']), repositories)\n for repo_name, repo_url, ref in repos:\n assignment = None\n\n # Try to get the assignment object from running map\n for code in repo_name.split('-'):\n assignment = assignments.get(code, None)\n\n # If not in the map, then try to get from the database\n if assignment is None:\n assignment = Assignment.query.filter(\n Assignment.unique_code.in_(repo_name.split('-'))\n ).first()\n\n if assignment is not None:\n assignments[assignment.unique_code] = assignment\n\n # If not in database or map, then eject\n if assignment is None:\n print(f'Could not find assignment for {repo_name}')\n continue\n\n # Guess github username, then create the repo if it doesn't yet exist\n user, github_username = guess_github_username(assignment, repo_name)\n repo = check_repo(assignment, repo_url, github_username, user)\n\n if user is None:\n continue\n\n # Check for broken submissions\n submissions = []\n for submission in Submission.query.filter(Submission.assignment_repo_id == repo.id).all():\n if submission is None:\n continue\n if submission.owner_id != user.id:\n print(f'found broken submission {submission.id}')\n submission.owner_id = repo.owner_id\n submissions.append(submission.id)\n db.session.commit()\n for sid in submissions:\n enqueue_autograde_pipeline(sid)\n\n # Check for missing submissions\n for commit in map(lambda x: x['node']['oid'], ref['target']['history']['edges']):\n submission = Submission.query.filter(\n Submission.commit == commit\n ).first()\n if submission is None:\n print(f'found missing submission {github_username} {commit}')\n submission = Submission(\n commit=commit,\n owner=user,\n assignment=assignment,\n repo=repo,\n state=\"Waiting for resources...\",\n )\n db.session.add(submission)\n db.session.commit()\n init_submission(submission)\n enqueue_autograde_pipeline(submission.id)\n\n r = AssignmentRepo.query.filter(AssignmentRepo.repo_url == repo_url).first()\n if r is not None:\n if r.owner_id != user.id:\n print(f'fixing broken repo owner {r.id}')\n r.owner_id = user.id\n submissions = []\n for submission in Submission.query.filter(\n Submission.assignment_repo_id == r.id\n ).all():\n submission.owner_id = user.id\n submissions.append(submission.id)\n\n db.session.commit()\n for sid in submissions:\n enqueue_autograde_pipeline(sid)\n\n if repo:\n print(f'checked repo: {repo_name} {github_username} {user} {repo.id}')",
"def repo_refresh_for_unfinished():\n project_list = Project.objects()\n crawl_list = []\n for repo in project_list:\n if repo.analyser_progress != \"100%\":\n crawl_list.append(repo.project_name)\n analyser.add_repos(current_user.username, crawl_list)\n flash('Refresh for unfinished successfully!', 'success')\n return redirect(url_for('main.admin_manage'))",
"def list_all_repos_info():\n repos = ALL_REPOS\n for repo_name, repo in zip(repos, _repos(repos)):\n repo_name = shorten_path(repo_name)\n print(repo_name)\n try:\n nbr_ahead, nbr_behind = _nbr_commits_ahead_and_behind(repo)\n except git.exc.GitCommandError:\n print(f\" {repo.active_branch.name}\")\n except DetachedHeadError:\n print(f\" HEAD --> {repo.head.commit}\")\n else:\n nb_tabul = 3 if len(repo.active_branch.name) < 6 else 2\n tabuls = \"\\t\" * nb_tabul\n print(f\" {repo.active_branch.name}{tabuls}↓ {nbr_behind} ↑ {nbr_ahead}\")\n if repo.index.diff(None):\n print(\" !!! With unstaged changes !!!\")\n if repo.index.diff(\"HEAD\"):\n print(\" !!! With uncommited changes !!!\")",
"def protect_pr_branch_with_tests_if_any_exist(org: Organization, repo: Repository,\n branches: Dict[str, Branch]) -> List[Change[str]]:\n def execute_test_protection(change: Change[str], branch: Branch, existing_checks: Set[str],\n known_status_checks: Set[str], known_checkruns: Set[str]) -> Change[str]:\n\n all_known_checks = known_status_checks | known_checkruns # For convenience later to treat them as a single set\n\n print_debug(\"[%s] Changing status checks on branch '%s' to [%s]\" %\n (highlight(repo.name), highlight(branch.name),\n highlight(\", \".join(list(all_known_checks)))))\n try:\n if existing_checks:\n branch.edit_required_status_checks(strict=True, contexts=list(all_known_checks))\n else:\n safe_branch_edit_protection(\n branch,\n strict=True,\n contexts=list(all_known_checks),\n )\n except GithubException as e:\n print_error(\"Can't edit required status checks on repo %s branch %s: %s\" %\n (repo.name, branch.name, str(e)))\n return change.failure()\n return change.success()\n\n prb = get_pr_branch(repo, branches)\n if not prb:\n return []\n\n existing_checks = set() # type: Set[str]\n try:\n rqs = prb.get_required_status_checks()\n except GithubException:\n # the repository has currently no status checks\n pass\n else:\n if len(rqs.contexts) > 0:\n # The repository already has some status checks\n existing_checks = set(rqs.contexts)\n print_debug(\"Branch %s on repo %s already has status checks [%s]\" %\n (highlight(prb.name), highlight(repo.name), highlight(\", \".join(existing_checks))))\n\n # the repository currently has no status checks, let's see if any came in within the last 7 days\n sevendaysago = datetime.now() - timedelta(days=7)\n commits = repo.get_commits(prb.name, since=sevendaysago)\n known_status_checks = set() # type: Set[str]\n known_checkruns = set() # type: Set[str]\n for commit in commits:\n for status in commit.get_statuses(): # type: CommitStatus\n if status.context not in known_status_checks:\n print_debug(\"New status check [%s]: %s %s '%s'\" %\n (commit.sha, status.updated_at,\n status.context, status.description))\n known_status_checks.add(status.context)\n for checkrun in commit.get_check_runs(): # type: CheckRun\n if checkrun.name not in known_checkruns:\n print_debug(\"New check run [%s]: %s %s %s\" %\n (commit.sha, checkrun.completed_at, checkrun.name, checkrun.app))\n known_checkruns.add(checkrun.name)\n\n all_known_checks = known_status_checks | known_checkruns # For convenience later to treat them as a single set\n print_debug(\"Found status checks [%s]\" % \", \".join(all_known_checks))\n\n if all_known_checks and all_known_checks != existing_checks:\n # add all known checks as required checks\n print_debug('Adding checks [%s] to branch %s on repo %s' %\n (highlight(\", \".join((all_known_checks) - existing_checks)),\n highlight(prb.name), highlight(repo.name)))\n return [Change(\n meta=ChangeMetadata(\n executor=execute_test_protection,\n params=[prb, existing_checks, known_status_checks, known_checkruns]\n ),\n action=ChangeActions.REPLACE if existing_checks else ChangeActions.ADD,\n before=\"%s checks\" % len(existing_checks) if existing_checks else \"No checks\",\n after=\"%s checks\" % len(all_known_checks),\n )]\n return []",
"def list_all_branches(self) -> dict:\n try:\n branches_response = self.repo.get_branches()\n branches_list = []\n for branch in branches_response:\n branches_list.append(branch.raw_data.get('name'))\n return make_success_response(200, branches_list)\n except GithubException as github_exc:\n return make_error_response(github_exc.status, github_exc.data)",
"def check_branch(\n comp_name: str, branch_name: str, branch: Dict[str, defs.ComponentVersion]\n ) -> None:\n uptodate_files: Dict[pathlib.Path, Tuple[pathlib.Path, defs.ComponentFile]] = {}\n\n if not RE_BRANCH_NAME.match(branch_name):\n res.append(f\"{comp_name}: Invalid branch name: {branch_name}\")\n\n for ver, version in sorted(branch.items()):\n if not RE_VERSION_STRING.match(ver):\n res.append(f\"{comp_name}/{branch_name}: Invalid version string: {ver}\")\n\n other_cksums, driver_cksums = _split_by_existence(comp_name, branch_name, version.files)\n if version.outdated:\n update_to = [\n o_version\n for o_version in branch.values()\n if not o_version.outdated\n and _split_by_existence(comp_name, branch_name, o_version.files)[0]\n == other_cksums\n ]\n if len(update_to) != 1:\n res.append(\n f\"{comp_name}/{branch_name}/{ver}: Got {len(update_to)} possible \"\n f\"versions to update to instead of exactly one\"\n )\n else:\n bad_files = sorted(\n relpath\n for relpath, (path, fdata) in driver_cksums.items()\n if util.file_sha256sum(path) != fdata.sha256\n )\n if bad_files:\n res.append(f\"{comp_name}/{branch_name}/{ver}: Bad checksum for {bad_files}\")\n\n if not uptodate_files:\n uptodate_files = driver_cksums\n elif uptodate_files != driver_cksums:\n res.append(\n f\"{comp_name}/{branch_name}: All the up-to-date versions should \"\n f\"define the same set of files with the same checksums\"\n )\n\n if not any(not version.outdated for version in branch.values()):\n res.append(f\"{comp_name}/{branch_name}: No non-outdated versions\")",
"def repos(request):\n # Clean up garbage created by buggy edits\n bad_branch_keys = models.Branch.query(models.Branch.owner == None).fetch(\n 100, keys_only=True)\n if bad_branch_keys:\n ndb.delete_multi(bad_branch_keys)\n repo_map = {}\n for repo in models.Repository.query().fetch(1000, batch_size=100):\n repo_map[repo.key] = repo\n branches = []\n for branch in models.Branch.query().fetch(2000, batch_size=100):\n repo_key = branch.repo_key\n if repo_key in repo_map:\n branch.repository = repo_map[repo_key]\n branches.append(branch)\n branches.sort(key=lambda b: map(\n unicode.lower, (b.repository.name, b.category, b.name)))\n return respond(request, 'repos.html', {'branches': branches})",
"def _computeobsoleteset(repo):\n obs = set()\n getrev = repo.changelog.nodemap.get\n getphase = repo._phasecache.phase\n for node in repo.obsstore.successors:\n rev = getrev(node)\n if rev is not None and getphase(repo, rev):\n obs.add(rev)\n return obs",
"def new_commits(repo, sha):\n from datetime import datetime\n\n dateformat = \"%a, %d %b %Y %H:%M:%S GMT\"\n release_commit = repo.get_commit(sha)\n since = datetime.strptime(release_commit.last_modified, dateformat)\n commits = repo.get_commits(since=since)\n if len(list(commits)) == 1:\n return False\n return reversed(list(commits)[:-1])",
"def get_changes(access_token, organization_url, target_repo_name, source_branches, target_branch_name, pull_quantity, ignore_words=[]) -> dict:\n print('\\nConnecting to API\\n')\n try:\n # Create a connection to the org\n credentials = BasicAuthentication('', access_token)\n connection = Connection(base_url=organization_url, creds=credentials)\n\n # Get git Client\n # See azure.devops.v5_0.models for models\n # azure.devops.git.git_client_base for git_client methods\n git_client = connection.clients.get_git_client()\n\n # Get the repo\n repositories = git_client.get_repositories()\n\n except MSExceptions.ClientRequestError as err:\n print('Client Request Error:', str(err))\n return None\n except MSExceptions.AuthenticationError as err:\n print('Authentication Error: ', str(err))\n\n target_repo = None\n for repo in repositories:\n if repo.name == target_repo_name:\n target_repo = repo\n\n if not target_repo:\n print(f'Repository {target_repo_name} not found.')\n return None\n\n all_changes = {}\n\n ignored_commits = []\n processed_commits = []\n\n for branch in source_branches:\n\n # Find commits for the specific branch combination\n search_criteria = GitPullRequestSearchCriteria (\n source_ref_name = f'refs/heads/{branch}',\n target_ref_name = f'refs/heads/{target_branch_name}',\n status = 'Completed'\n )\n\n pull_requests = git_client.get_pull_requests(target_repo.id, search_criteria, top=9999)\n\n\n print(f\"Proccesing PR commits for {branch}...\")\n with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:\n future_prs = { executor.submit(process_pull_requests, git_client, target_repo.id, pull, ignore_words): pull for pull in pull_requests}\n for future in tqdm(futures.as_completed(future_prs), unit=' PRs'):\n data, processed, ignored = future.result()\n for change in data.keys():\n if all_changes.get(change):\n all_changes[change] = all_changes[change] + data[change]\n else:\n all_changes[change] = data[change]\n for commit in processed:\n processed_commits.append(commit)\n for commit in ignored:\n ignored_commits.append(commit)\n print()\n\n return all_changes, processed_commits, ignored_commits",
"def base_branches() -> list[str]:\n branches = []\n\n default = sh(\"git rev-parse --abbrev-ref origin/HEAD\").removeprefix(\"origin/\")\n branches.append(default)\n\n releases = sh(\n \"git branch --all --sort=-committerdate --list *release/* | head -10\"\n ).splitlines()\n releases = [b.removeprefix(\"*\").strip() for b in releases]\n branches.extend(releases)\n\n return branches",
"def load_commits(db, repo_name):\n\n SEP = \"-=:=-=:=-=:=-=:=-=:=-=:=-=:=-\"\n GITLOG = f\"git log --no-merges --format='format:date: %aI%nhash: %H%nauth: %aE%nname: %aN%nsubj: %s%n%b%n{SEP}'\"\n SHORT_LINES = 5\n\n # $ git log --format=\"format:---------------------%ndate: %aI%nhash: %H%nauth: %aE%nname: %aN%nsubj: %s%n%b\"\n # ---------------------\n # date: 2021-04-21T16:13:23-04:00\n # hash: efa13ff1d2fb3d8b2ddee8be0868ae60f9bc35a6\n # auth: [email protected]\n # name: Julia Eskew\n # subj: fix: TNL-8233: Change exception raised at problem creation failure from generic exception to LoncapaProblemError. (#27361)\n # Raising this specific exception will cause the failure to be handled more gracefully by problem rescoring code.\n # ---------------------\n # date: 2021-04-15T21:36:47-04:00\n # hash: a1fe3d58dc112bd975f1237baaee787ba22929f1\n # auth: [email protected]\n # name: Albert (AJ) St. Aubin\n # subj: [bug] Corrected issue where program dash showed incorrect completed count\n # [MICROBA-1163]\n # \n # This change will correct an issue in the Program Dashboard where a user\n # would see a course as completed, but not see their Certificate because\n # it was not available to them yet.\n # ---------------------\n\n with db:\n commit_table = db[\"commits\"]\n\n log = get_cmd_output(GITLOG)\n for i, commit in enumerate(log.split(SEP + \"\\n\")):\n if commit:\n lines = commit.split(\"\\n\", maxsplit=SHORT_LINES)\n row = {\"repo\": repo_name}\n for line in lines[:SHORT_LINES]:\n key, val = line.split(\": \", maxsplit=1)\n row[key] = val\n row[\"body\"] = lines[SHORT_LINES].strip()\n analyze_commit(row)\n commit_table.insert(row)",
"def test_get_latest_results_for_repo(self):\n for x in xrange(98, 103):\n self.db.insert_single_result(generate_mock_result(project='TEST', repository='test-repo', run_id=x))\n self.db.insert_single_result(generate_mock_result(project='NEWTEST', repository='newtest-repo', run_id=x))\n testlatest = self.db.get_latest_results_for_project('TEST')\n self.assertEqual(len(testlatest), 1)\n self.assertEqual(testlatest[0].repository, 'test-repo')\n self.assertEqual(testlatest[0].run_id, '102')",
"def make_branches(self, api_json=None):\n if api_json is None:\n return []\n\n obj = simplejson.loads(api_json)\n branches = [item[\"commit\"][\"sha\"] for item in obj]\n\n print branches\n\n return branches",
"def branches(self):\n return sorted([\n br[20:] for br in self.repo.refs.keys() if (\n br.startswith('refs/remotes/origin/') and\n br[20:] != 'HEAD'\n )\n ])",
"def get_github_commits():\n utcnow = datetime.datetime.utcnow()\n yesterday = utcnow - datetime.timedelta(hours=24)\n yesterday = yesterday.replace(hour=12, minute=0, second=0)\n iso = yesterday.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n\n txt = [\"> IEM Code Pushes <to branch> on Github\\n\"]\n html = [\"<h3>IEM Code Pushes <to branch> on Github</h3>\"]\n\n # get branches, main is first!\n branches = [\"main\"]\n req = exponential_backoff(requests.get, IEM_BRANCHES, timeout=30)\n for branch in req.json():\n if branch[\"name\"] == \"main\":\n continue\n branches.append(branch[\"name\"])\n\n hashes = []\n links = []\n for branch in branches:\n uri = (\n f\"https://api.github.com/repos/akrherz/iem/commits?since={iso}&\"\n f\"sha={branch}\"\n )\n req2 = exponential_backoff(requests.get, uri, timeout=30)\n # commits are in reverse order\n for commit in req2.json()[::-1]:\n if commit[\"sha\"] in hashes:\n continue\n hashes.append(commit[\"sha\"])\n timestring = commit[\"commit\"][\"author\"][\"date\"]\n utcvalid = datetime.datetime.strptime(\n timestring, \"%Y-%m-%dT%H:%M:%SZ\"\n )\n valid = utcvalid.replace(tzinfo=pytz.utc).astimezone(\n pytz.timezone(\"America/Chicago\")\n )\n data = {\n \"stamp\": valid.strftime(\"%b %-d %-2I:%M %p\"),\n \"msg\": commit[\"commit\"][\"message\"],\n \"htmlmsg\": htmlize(commit[\"commit\"][\"message\"])\n .replace(\"\\n\\n\", \"\\n\")\n .replace(\"\\n\", \"<br />\\n\"),\n \"branch\": branch,\n \"url\": commit[\"html_url\"][:-20], # chomp to make shorter\n \"i\": len(links) + 1,\n }\n links.append(\"[%(i)s] %(url)s\" % data)\n txt.append(\n mywrap(\" %(stamp)s[%(i)s] <%(branch)s> %(msg)s\" % data)\n )\n html.append(\n (\n '<li><a href=\"%(url)s\">%(stamp)s</a> '\n \"<%(branch)s> %(htmlmsg)s</li>\\n\"\n )\n % data\n )\n\n if len(txt) == 1:\n txt = txt[0] + \" No code commits found in previous 24 Hours\"\n html = html[0] + (\n \"<strong>No code commits found \" \"in previous 24 Hours</strong>\"\n )\n else:\n txt = \"\\n\".join(txt) + \"\\n\\n\" + \"\\n\".join(links)\n html = html[0] + \"<ul>\" + \"\\n\".join(html[1:]) + \"</ul>\"\n\n return txt + \"\\n\\n\", html + \"<br /><br />\""
] | [
"0.71542406",
"0.57429",
"0.5712632",
"0.5642177",
"0.55806303",
"0.5520008",
"0.54752177",
"0.54629207",
"0.5433732",
"0.5424115",
"0.53311217",
"0.5299445",
"0.5283825",
"0.52417576",
"0.5239561",
"0.5191586",
"0.5190831",
"0.51559097",
"0.5126341",
"0.512589",
"0.50958616",
"0.506375",
"0.5058959",
"0.4998779",
"0.4994151",
"0.49883273",
"0.4988065",
"0.4966503",
"0.49611512",
"0.4944596"
] | 0.72896576 | 0 |
Reset this node's (and its children's) state to ready | def reset(self):
self.state = EvaluationState.ready
for child in self.children:
if hasattr(child, "reset"):
child.reset() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reset(self):\n for c in self.children:\n c.reset()\n self.marked = False",
"def reset_tree(self):\n self.root = None\n self.action = None\n self.dist_probability = None",
"def reset(self):\n self.children.clear()",
"def reset(self):\r\n self.key = None\r\n self.value = None\r\n self.parent = None\r\n self.left_child = None\r\n self.right_child = None\r\n self.color = BLACK\r\n self.size_tree = 0",
"def _re_init(self):\n self._child_index = 0",
"def _clear_node(self):\n self._element = None\n self._parent = None\n self._leftchild = None\n self._rightchild = None\n self._height = None",
"def reset(self) -> None:\r\n self.tree.delete(*self.tree.get_children())",
"def reset(self):\r\n self.tree = KDTree()\r\n self.paint()",
"def _reset_traversal_state(self):\n for n in self.nodes.values():\n n.reset_traversal_state()",
"def reset(self):\n self.state.fill(EMPTY)",
"def updateTree(self):\n self.reset()\n self.resetTree() \n self.read()",
"def reset_state(self):\n self._events = None # We'll have to grab the event handlers again in case they changed.\n self._elements.clear() # Clear any cached elements in case they changed or disappeared.",
"def clear(self):\n self.tree = Tree()",
"def reset(self):\n for parent in self.GetParents():\n parent.reset()",
"def _reset_cache(self):\n self._cache = None\n for child in self.children: # pylint: disable=E1101\n child._reset_cache()",
"def clear(self):\n self.root = None",
"def reset(self):\n self._set_init()",
"def reset_states(self) -> None:\n self._metric.reset_states()\n # for each child log\n for child in self.children_real_fake:\n child[0].reset_states()\n child[1].reset_states()",
"def reset(self):\n SGMLParser.reset(self)\n self.__depth = 0\n self.__inobject = False\n self.__param = {}\n\n # this a critical data structure,\n self.__nodeTree = [[], ]",
"def reset(self):\n self.set_state(self._initial_state)",
"def clear(self) -> None:\n self.node.prev = self.node.next = self.node",
"def reset_state(self):\n for row in range(len(self.state)):\n for column in range(len(self.state[row])):\n self.state[row][column] = None",
"def clear(self):\n while len(self.nodes) > 0:\n self.nodes[0].remove()\n\n self.has_been_modified = False",
"def reset_graph(self):\n self.nodes = {}\n self.add_node(self.initial_state)\n self.add_node(self.final_state)",
"def resetTree(self):\n for fila in self.verDatos.get_children():\n self.verDatos.delete(fila)",
"def restore(self):\n self.nodes.restore()",
"def reset(self):\n self.__init__()",
"def reset(self):\n self.__init__()",
"def reset(self):\n self.__init__()",
"def on_ResetNode_clicked(self):\n # TODO: not implemented yet\n raise NotImplementedError"
] | [
"0.7023482",
"0.6967508",
"0.6964375",
"0.670918",
"0.67040575",
"0.6657348",
"0.65749437",
"0.65516037",
"0.65334755",
"0.64151716",
"0.6284906",
"0.62482226",
"0.62395614",
"0.61377215",
"0.61206025",
"0.6064591",
"0.604821",
"0.6045733",
"0.60326046",
"0.6019434",
"0.5980827",
"0.59577423",
"0.5953604",
"0.59498775",
"0.5944229",
"0.5898186",
"0.5897568",
"0.5897568",
"0.5897568",
"0.5891227"
] | 0.75797325 | 0 |
Evaluates the node's (and its children's) state. Returns success if any node succeeds, else failure. | def evaluate(self, blackboard):
success = EvaluationState.success
for child in self.children:
state = child.__call__(blackboard)
if state == success:
return success
return EvaluationState.failure | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def evaluate(self, blackboard):\n success = EvaluationState.success\n\n state = success\n for child in self.children:\n state = child.__call__(blackboard)\n\n if state != success:\n break\n\n return state",
"def evaluate(self, tree):\n\t\tpass",
"def do_check(self):\n res = self.entity.do_check(self.context)\n if res:\n return self.RES_OK, 'Node check succeeded.'\n else:\n return self.RES_ERROR, 'Node check failed.'",
"def evaluate(self):\n return self._evaluate_recur(self.root())",
"def evaluate(self):\n return self._evaluate_recur(self.root())",
"def evaluate(self) :\n for inp in self.inStates :\n if inp.getState() == 1 : return 1\n return 0",
"def evaluate(self, state):\n\n if self.is_coords and len(state) != len(self.coords):\n raise Exception(\"\"\"state must have the same length as coords.\"\"\")\n\n if not len(state) == len(set(state)):\n raise Exception(\"\"\"Each node must appear exactly once in state.\"\"\")\n\n if min(state) < 0:\n raise Exception(\"\"\"All elements of state must be non-negative\"\"\"\n + \"\"\" integers.\"\"\")\n\n if max(state) >= len(state):\n raise Exception(\"\"\"All elements of state must be less than\"\"\"\n + \"\"\" len(state).\"\"\")\n\n return self.calculate_fitness(state)",
"def evaluate(self) :\n for inp in self.inStates :\n if inp.getState() == 0 : return 0\n return 1",
"def evaluate(self) :\n if self.inStates[0].getState() == self.inStates[1].getState(): return 0\n return 1",
"def evaluate(self, state):\n abstract",
"def test_get_node_state(self):\n pass",
"def testTree(self, valid):\n return testTreeF(self, valid)",
"def _isthisapropertree(self):\n ok = True\n if self._leftchild:\n if self._leftchild._parent != self:\n ok = False\n if self._leftchild._isthisapropertree() == False:\n ok = False\n if self._rightchild:\n if self._rightchild._parent != self:\n ok = False\n if self._rightchild._isthisapropertree() == False:\n ok = False\n if self._parent:\n if (self._parent._leftchild != self\n and self._parent._rightchild != self):\n ok = False\n return ok",
"def test_ChangeValueTree():\n Tree = graph.oval_graph.OvalNode(1, 'operator', 'and', False, [\n graph.oval_graph.OvalNode(2, 'value', \"true\", False),\n graph.oval_graph.OvalNode(3, 'value', \"false\", False),\n graph.oval_graph.OvalNode(4, 'operator', 'or', False, [\n graph.oval_graph.OvalNode(5, 'value', \"false\", False),\n graph.oval_graph.OvalNode(6, 'value', \"true\", False)\n ]\n )\n ]\n )\n\n Tree.change_tree_value(3, \"true\")\n tests.any_test_help.any_test_treeEvaluation_with_tree(Tree, \"true\")",
"def evaluate(self):\n eval_list = nx.topological_sort(self.graph)\n for n in eval_list:\n n.evaluate()\n print(\"evaluating type\", type(n))\n\n # Notify observers of finished calculation\n self.notifyObservers(\"EVALUATION DONE\")\n return \"FINISHED\"",
"def eval_tree(tree: GPTree, dataset: Iterable) -> list:\n results = []\n for data in zip(*dataset):\n try:\n output = tree.compute_tree(data[0])\n results.append(\n 0 if output == data[1] else 1\n ) # right or wrong, but no error.\n except Exception:\n results.append(2) # Fails to run.\n\n return results",
"def __call__(self, node):\n return True;\n predcount = self.CountPred(node);\n if predcount == 0: return True;\n return len(node.predicates) != 0;",
"def evaluate_node(self):\n # p, v = np.random.random(225).astype(np.float16), np.random.random()\n socket = zmq.Context().socket(zmq.DEALER)\n socket.setsockopt_string(zmq.IDENTITY, self.player_id)\n socket.connect('ipc://./tmp/oracle_%s' % self.tree.model_name)\n print('start to evaluate', self.tree.model_name)\n while True:\n # print(self.tree.to_evaluate.qsize())\n batch = []\n states = []\n colors = []\n size = self.tree.to_evaluate.qsize()\n if size > config.INFERENCE_BATCHSIZE:\n size = config.INFERENCE_BATCHSIZE\n elif size == 0:\n time.sleep(0.001)\n continue\n for _ in range(size):\n t, black, white = self.tree.to_evaluate.get()\n mine, yours = posswap(t, black, white)\n batch.append((str(mine), str(yours), t % 2))\n states.append((black, white))\n colors.append(t % 2)\n socket.send(msgpack.dumps((batch, self.player_id)))\n result = msgpack.loads(socket.recv())\n assert len(states) == len(result[0])\n assert len(states) == len(result[1])\n for ind, state in enumerate(states):\n with self.lock:\n self.tree.nodes[state].p = result[0][ind]\n if colors[ind] == 0:\n self.tree.nodes[state].v = result[1][ind]\n else:\n self.tree.nodes[state].v = -result[1][ind]\n self.tree.nodes[state].updated = True",
"def check(self, node):\n # do the necessary setup/arguments and call self.visit (node, args)\n self.visit(node, defined=set())",
"def _isthisapropertree(self):\n ok = True\n if self._leftchild is not None:\n if self._leftchild._parent != self:\n ok = False\n if self._leftchild._isthisapropertree() is False:\n ok = False\n if self._rightchild is not None:\n if self._rightchild._parent != self:\n ok = False\n if self._rightchild._isthisapropertree() is False:\n ok = False\n if self._parent is not None:\n if self not in (self._parent._leftchild, self._parent._rightchild):\n ok = False\n return ok",
"def compute(self, *args, **kwargs):\n for node in self.evaluation_sequence:\n node.evaluate()",
"def eval_logic(self, checkDict):\n result = True\n #gets individual evaluations from children\n passList = []\n for child in self.children:\n myVal = child.eval_comparison(checkDict)\n passList.append(child.eval_comparison(checkDict))\n\n #if only one child returns the only boolean available\n if(len(passList) == 1):\n result = passList[0]\n\n #TODO: Combine following cases possibly\n #print(passList)\n #gets resutl if only 2 simple logics\n elif(len(passList) == 2 and len(self.operators) == 1):\n\n result = self.operators[0](passList[0], passList[1])\n else:\n #combines all children logic using the operators\n firstCheck = True\n opIndex = 0\n for i in range(0,len(passList)):\n if(firstCheck):\n firstCheck = False\n result = self.operators[opIndex](passList[0], passList[1])\n i+=1\n else:\n result = self.operators[opIndex](result,passList[i])\n opIndex += 1\n \"\"\"\n print('----------------------')\n print(result)\n \"\"\"\n return result",
"def goalTest(node, goal):\r\n if node.state == goal:\r\n return node",
"def validate(self, node):",
"def performBacktrackSearch(self, rootNode, node):\r\n \r\n print (\"-- proc --\", node.state.assignment)\r\n \r\n #check if we have reached goal state\r\n if node.state.checkGoalState():\r\n print (\"reached goal state\")\r\n return True\r\n \r\n else:\r\n \r\n #check if there is a case of early failure\r\n #if node.state.forwardCheck(): \r\n if node.state.arcConsistency():\r\n \r\n #find an unassigned variable \r\n variable = node.state.selectUnassignedVariable()\r\n \r\n #for all values in the domain\r\n for value in node.state.orderDomainValues():\r\n \r\n #check if constraints are satisfied\r\n if CSP.checkConstraints(node.state.assignment,\r\n variable, value):\r\n \r\n #create child node\r\n childNode = Node(State(node.state.assignment, \r\n node.state.possibleValues, variable, value))\r\n \r\n node.addChild(childNode)\r\n \r\n #show the search tree explored so far\r\n treeplot = TreePlot()\r\n treeplot.generateDiagram(rootNode, childNode)\r\n \r\n result = self.performBacktrackSearch(rootNode, childNode)\r\n if result == True:\r\n return True\r\n return False",
"def evalBoolean(tree):\n # check if children the children is a \"or\" or a \"and\" tokken\n if (tree.children[0].data == \"or\"):\n return evalBoolean(tree.children[0].children[0]) or evalBoolean(tree.children[0].children[1])\n if (tree.children[0].data) == \"and\":\n return evalBoolean(tree.children[0].children[0]) and evalBoolean(tree.children[0].children[1])\n \n # set var1\n if(tree.children[0].data == \"integer\"):\n var1 = evalInteger(tree.children[0])\n elif(tree.children[0].data == \"variable\"):\n var1 = getValue(tree.children[0].children[0].value)\n\n # set var2\n if(tree.children[2].data == \"integer\"):\n var2 = evalInteger(tree.children[2])\n elif(tree.children[2].data == \"variable\"):\n var2 = getValue(tree.children[2].children[0].value)\n\n if(tree.children[1].children[0].data == \"greater\"):\n return var1 > var2\n if(tree.children[1].children[0].data == \"less\"):\n return var1 < var2\n if(tree.children[1].children[0].data == \"equals\"):\n return var1 == var2\n if(tree.children[1].children[0].data == \"nequal\"):\n return var1 != var2\n\n print(\"ERROR : UNEXPECTED TOKKEN\")\n return False",
"def evaluate(self, payload, level=0, verbose=True):\n\n # if children are joined by AND, evaluate every child until all children\n # are evaluated or until a False breaks the loop (Need all True for AND)\n if self.conjunction_ in ['AND', 'NAND']:\n result = True\n i = 0\n while result and (i < len(self.children_)):\n \n if verbose:\n tabs = \"\\t\" * level\n if i > 0: print(\"\\n\" + tabs + f\"{self.conjunction_} \\n\")\n child_print = 'Composite' if isinstance(self.children_[i], Composite) else 'Evaluation'\n print(tabs + f\"Evaluating Child {i + 1}, Level {level + 1} - {child_print}\")\n \n result = self.children_[i].evaluate(payload, level + 1, verbose=verbose)\n i += 1\n if self.conjunction_ == 'NAND':\n result = not result\n\n\n # if children are joined by OR, evaluate every child until all children\n # are evaluated or until a True breaks the loop (only need 1 True for OR)\n elif self.conjunction_ in ['OR', 'NOR']:\n result = False\n i = 0\n while result == False and (i < len(self.children_)):\n \n if verbose:\n tabs = \"\\t\" * level\n if i > 0: print(\"\\n\" + tabs + f\"{self.conjunction_} \\n\")\n child_print = 'Composite' if isinstance(self.children_[i], Composite) else 'Evaluation'\n print(tabs + f\"Evaluating Child {i + 1}, Level {level + 1} - {child_print}\")\n \n result = self.children_[i].evaluate(payload, level + 1, verbose=verbose)\n i += 1\n if self.conjunction_ == 'NOR':\n result = not result\n\n # XOR evaluation - 1 and only 1 can be True. Have to iterate over all children unless the number of trues becomes greater than 1\n else:\n i = 0\n true_count = 0\n while true_count < 2 and (i < len(self.children_)):\n if verbose:\n tabs = \"\\t\" * level\n if i > 0: print(\"\\n\" + tabs + f\"{self.conjunction_} \\n\")\n child_print = 'Composite' if isinstance(self.children_[i], Composite) else 'Evaluation'\n print(tabs + f\"Evaluating Child {i + 1}, Level {level + 1} - {child_print}\")\n \n # += a boolean is equivalent to += 1 for T and += 0 for False\n true_count += self.children_[i].evaluate(payload, level + 1, verbose=verbose)\n i += 1\n\n if true_count == 1:\n result = True\n else:\n result = False\n\n if verbose: \n tabs = \"\\t\" * level\n print(\"\\n\" + tabs + f\"Composite Result: {result}\")\n\n return result",
"def valid_tree(phi):\n q = deque([phi]) # queue of nodes to check\n visited = list() # already checked\n\n # save the indim of the root node, and make sure all the indims\n # of the children are the same\n indim = phi.indim\n retval = True\n varsfound = 0\n\n while len(q) > 0:\n # node to check\n node = q.popleft()\n\n # check outdim\n if isinstance(node, amnet.Variable):\n retval &= (node.outdim == node.indim)\n varsfound += 1\n elif isinstance(node, amnet.Linear):\n m, n = node.w.shape\n retval &= (node.outdim == m)\n retval &= (node.x.outdim == n)\n retval &= (all([bi == 0 for bi in node.b])) # check value\n elif isinstance(node, amnet.Constant):\n retval &= (node.outdim == len(node.b))\n retval &= (all([wij == 0 for wij in np.nditer(node.w)])) # check value\n elif isinstance(node, amnet.Affine):\n m, n = node.w.shape\n retval &= (node.outdim == m)\n retval &= (node.x.outdim == n)\n retval &= (m == len(node.b))\n elif isinstance(node, amnet.Mu):\n retval &= (node.outdim == node.x.outdim)\n retval &= (node.outdim == node.y.outdim)\n retval &= (node.z.outdim == 1)\n elif isinstance(node, amnet.Stack):\n retval &= (node.outdim == node.x.outdim + node.y.outdim)\n else:\n retval = False # unknown node type\n\n # check indim\n retval &= (node.indim == indim)\n\n # short-circuit if an inconsistency has been found\n if not retval:\n return False\n\n # add children to queue\n if not(any(node is e for e in visited)):\n visited.append(node)\n #q.extend(children(node))\n q.extend([c for c in children(node) if c not in visited])\n\n # finished iterating\n # TODO: also check if graph is cyclic\n return (varsfound == 1)",
"def hasChildren():",
"def is_leaf(self):\n # TODO: Check if both left child and right child have no value\n return ... and ..."
] | [
"0.71742636",
"0.6537723",
"0.6279385",
"0.6106426",
"0.6106426",
"0.60549605",
"0.60402316",
"0.60212874",
"0.5946995",
"0.58815885",
"0.58076704",
"0.5800487",
"0.57829833",
"0.5762511",
"0.56892544",
"0.56854844",
"0.56838524",
"0.5680494",
"0.56597567",
"0.56062233",
"0.5581533",
"0.55706203",
"0.55386907",
"0.55359536",
"0.5529258",
"0.5495778",
"0.54765767",
"0.54727125",
"0.54070693",
"0.5397771"
] | 0.6946291 | 1 |
imports 'catalog', and creates a pandas.DataFrame containing the columns specified in 'params'. 'catalog' is expected to be in the .csv format. | def import_data(catalog='xmatch_TGAS_Simbad.csv', params=None, nrows=None, delimiter=','):
print "Loading %s and creating DataFrame.." % catalog
df_imported = pd.read_csv(catalog, delimiter=delimiter, header=0, usecols=params, nrows=nrows)
print "..Done\n----------"
return df_imported | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_catalog(self):\n self.catalog = pd.read_csv(self.catalog_path, \n index_col=0, parse_dates=True)\n self.unique_years = self.catalog.index.year.unique()\n return",
"def loadData(catalog):\n return controller.loadData(catalog)",
"def loadData(catalog):\n return controller.loadData(catalog)",
"def loadData(catalog):\n controller.loadData(catalog)",
"def loadData(catalog):\n controller.loadData(catalog)",
"def loadData(catalog):\n controller.loadData(catalog)",
"def loadData(catalog):\n controller.loadData(catalog)",
"def loadData(catalog):\n controller.loadData(catalog)",
"def loadData(catalog):\r\n controller.loadData(catalog)",
"def load_data(catalog):\n controller.load_data(catalog)",
"def pd_load_acquisition_csv(acquisition_path, **kwargs):\n\n columns = [\n 'loan_id', 'orig_channel', 'seller_name', 'orig_interest_rate', 'orig_upb', 'orig_loan_term',\n 'orig_date', 'first_pay_date', 'orig_ltv', 'orig_cltv', 'num_borrowers', 'dti', 'borrower_credit_score',\n 'first_home_buyer', 'loan_purpose', 'property_type', 'num_units', 'occupancy_status', 'property_state',\n 'zip', 'mortgage_insurance_percent', 'product_type', 'coborrow_credit_score', 'mortgage_insurance_type',\n 'relocation_mortgage_indicator', 'year_quarter'\n ]\n dtypes = {\n 'loan_id': np.int64,\n 'orig_channel': CategoricalDtype(['B', 'C', 'R']),\n 'seller_name': str,\n 'orig_interest_rate': np.float64,\n 'orig_upb': np.int64,\n 'orig_loan_term': np.int64,\n 'orig_date': str,\n 'first_pay_date': str,\n 'orig_ltv': np.float64,\n 'orig_cltv': np.float64,\n 'num_borrowers': np.float64,\n 'dti': np.float64,\n 'borrower_credit_score': np.float64,\n 'first_home_buyer': CategoricalDtype(['N', 'U', 'Y']),\n 'loan_purpose': CategoricalDtype(['C', 'P', 'R', 'U']),\n 'property_type': CategoricalDtype(['CO', 'CP', 'MH', 'PU', 'SF']),\n 'num_units': np.int64,\n 'occupancy_status': CategoricalDtype(['I', 'P', 'S']),\n 'property_state': CategoricalDtype(\n ['AK', 'AL', 'AR', 'AZ', 'CA', 'CO', 'CT', 'DC', 'DE', 'FL', 'GA', 'HI',\n 'IA', 'ID', 'IL', 'IN', 'KS', 'KY', 'LA', 'MA', 'MD', 'ME', 'MI', 'MN',\n 'MO', 'MS', 'MT', 'NC', 'ND', 'NE', 'NH', 'NJ', 'NM', 'NV', 'NY', 'OH',\n 'OK', 'OR', 'PA', 'PR', 'RI', 'SC', 'SD', 'TN', 'TX', 'UT', 'VA', 'VI',\n 'VT', 'WA', 'WI', 'WV', 'WY']),\n 'zip': np.int64,\n 'mortgage_insurance_percent': np.float64,\n 'product_type': CategoricalDtype(['FRM']),\n 'coborrow_credit_score': np.float64,\n 'mortgage_insurance_type': np.float64,\n 'relocation_mortgage_indicator': CategoricalDtype(['N', 'Y']),\n 'year_quarter': np.int64\n }\n\n a = pd.read_csv(acquisition_path, names=columns, delimiter='|', dtype=dtypes, parse_dates=[6,7], error_bad_lines=True, warn_bad_lines=True, na_filter=True)\n return a",
"def pd_load_acquisition_csv(acquisition_path, **kwargs):\n\n cols = [\n 'loan_id', 'orig_channel', 'seller_name', 'orig_interest_rate', 'orig_upb', 'orig_loan_term',\n 'orig_date', 'first_pay_date', 'orig_ltv', 'orig_cltv', 'num_borrowers', 'dti', 'borrower_credit_score',\n 'first_home_buyer', 'loan_purpose', 'property_type', 'num_units', 'occupancy_status', 'property_state',\n 'zip', 'mortgage_insurance_percent', 'product_type', 'coborrow_credit_score', 'mortgage_insurance_type',\n 'relocation_mortgage_indicator'\n ]\n\n dtypes = {\n \"loan_id\": np.int64,\n \"monthly_reporting_period\": str,\n \"servicer\": str,\n \"interest_rate\": np.float64,\n \"current_actual_upb\": np.float64,\n \"loan_age\": np.float64,\n \"remaining_months_to_legal_maturity\": np.float64,\n \"adj_remaining_months_to_maturity\": np.float64,\n \"maturity_date\": str,\n \"msa\": np.float64,\n \"current_loan_delinquency_status\": np.int32,\n \"mod_flag\": CategoricalDtype(['N', 'Y']),\n \"zero_balance_code\": CategoricalDtype(['01', '02', '06', '09', '03', '15', '16']),\n \"zero_balance_effective_date\": str,\n \"last_paid_installment_date\": str,\n \"foreclosed_after\": str,\n \"disposition_date\": str,\n \"foreclosure_costs\": np.float64,\n \"prop_preservation_and_repair_costs\": np.float64,\n \"asset_recovery_costs\": np.float64,\n \"misc_holding_expenses\": np.float64,\n \"holding_taxes\": np.float64,\n \"net_sale_proceeds\": np.float64,\n \"credit_enhancement_proceeds\": np.float64,\n \"repurchase_make_whole_proceeds\": np.float64,\n \"other_foreclosure_proceeds\": np.float64,\n \"non_interest_bearing_upb\": np.float64,\n \"principal_forgiveness_upb\": np.float64,\n \"repurchase_make_whole_proceeds_flag\": CategoricalDtype(['N', 'Y']),\n \"foreclosure_principal_write_off_amount\": np.float64,\n \"servicing_activity_indicator\": CategoricalDtype(['N', 'Y']),\n }\n print(acquisition_path)\n\n #return pd.read_csv(acquisition_path, names=cols, delimiter='|', dtype=dtypes, parse_dates=[6,7])\n return pd.read_csv('acq.csv', names=cols, delimiter='|', dtype=dtypes, parse_dates=[6,7])",
"def load_catalog(self, sql_context):\n self.df_catalog = (sql_context.read\n .format('jdbc')\n .options(url=self.catalog_connection,\n dbtable=\"(\" + self.catalog_query + \") as foo\",\n driver=\"org.postgresql.Driver\")\n .load())",
"def create_dataframe():\r\n\r\n df = pd.read_csv('data/data.csv', header=0)\r\n return df",
"def loadData(catalog):\n loadArtworks(catalog)\n loadArtists(catalog)\n loadAdquires(catalog)\n loadNacionalities(catalog)\n load2DArtworks(catalog)\n loadArtistMediumsTags(catalog)\n loadDptments(catalog)\n catalog['artists'] = sortArtists(catalog, 3)\n fillArtistMediums(catalog)\n fillMostUsedMediums(catalog)\n catalog['artists_tags'] = sortArtistTags(catalog, 3)\n sort_dptments(catalog)",
"def load() -> DataFrame:\n return load_file(__file__, \"default.csv.gz\")",
"def get_flat_file_data(kind: str, server: str='PROD', ID: str='42') -> DataFrame:\r\n k = {\r\n 'c': 'customer_data_{0}_{1}_.csv',\r\n 'b': 'vendor_data_{0}_{1}_.csv'\r\n }\r\n f = k[kind].format(server, ID)\r\n df = pd.read_csv(f'{BASE_DIR}/{f}', encoding='UTF-8')\r\n df = prepare_input_df(df)\r\n return df",
"def _csv_engine(filename, node):\n sep = node.get(\"sep\", \",\")\n header = node.get(\"header\", 0)\n logger.debug(\n \"Parsing CSV '{}'. sep={}, header={}.\".format(filename, sep, header)\n )\n index = node.get(\"index\")\n encoding = node.get(\"encoding\")\n if not index:\n raise InvalidConfig(\"An 'index' column is required. It should \"\n \"be the sample id column.\")\n\n df = pd.read_csv(filename, sep=sep, header=header, encoding=encoding)\n df.set_index(index, verify_integrity=True, inplace=True, drop=True)\n df.index = df.index.astype(str)\n\n return df",
"def create_catalog_dataframe(save_dataframe):\n if not save_dataframe:\n return pd.read_pickle(f'{path_dictionary[\"catalog_dataframe_grouped_path\"]}')\n\n catalog_items = DbHelper.get_all_catalog_items()\n\n # Create dataframe to put all information together\n columns = ['user_id', 'item_id', 'session_id', 'window_size_x', 'window_size_y', 'page_size_x', 'page_size_y', 'catalog_item_list', 'user_log_list']\n catalog_items_df = pd.DataFrame(catalog_items, columns=columns)\n\n # Clean 'Catalog Items' that user see during a session\n catalog_items_df['catalog_item_list'] = catalog_items_df.apply(clean_objects_listed, axis=1)\n\n # Clean Log Files\n catalog_items_df['user_log_list'] = catalog_items_df.apply(clean_logs, axis=1)\n\n # Get Catalog Items that user hover or has a click action, her/his mouse\n catalog_items_df = get_interacted_catalog_items(catalog_items_df)\n\n # Label the catalog items as 0\n catalog_items_df['catalog_item_list'] = catalog_items_df.apply(label_page_type, axis=1)\n\n catalog_items_df_grouped = catalog_items_df.groupby(['user_id', 'session_id'], as_index=False).agg(lambda x: list(x))\n catalog_items_df_grouped.drop(['item_id', 'window_size_x', 'window_size_y', 'page_size_x', 'page_size_y'], axis=1, inplace=True)\n\n if save_dataframe:\n catalog_items_df.to_pickle(f'{path_dictionary[\"path_raw_catalog_dataframe\"]}')\n catalog_items_df_grouped.to_pickle(f'{path_dictionary[\"path_catalog_dataframe\"]}')\n catalog_items_df_grouped.to_csv(f'{path_dictionary[\"path_catalog_csv\"]}', index=False, sep='|')\n return catalog_items_df_grouped",
"def _read(**kwargs) -> DataFrame:\n Engine.subscribe(_update_engine)\n\n try:\n pd_obj = FactoryDispatcher.read_csv_glob(**kwargs)\n except AttributeError:\n raise AttributeError(\"read_csv_glob() is only implemented for pandas on Ray.\")\n\n # This happens when `read_csv` returns a TextFileReader object for iterating through\n if isinstance(pd_obj, pandas.io.parsers.TextFileReader):\n reader = pd_obj.read\n pd_obj.read = lambda *args, **kwargs: DataFrame(\n query_compiler=reader(*args, **kwargs)\n )\n return pd_obj\n\n return DataFrame(query_compiler=pd_obj)",
"def prepare_data(filename='data/DOT_timeSeries.csv'):\n\n # read data file into pandas dataframe\n df = pd.read_csv(filename)\n\n # extract unwanted 'countries' from dataframe\n countries = ['Europe', 'Emerging and Developing Europe', 'Emerging and Developing Asia',\n 'Middle East, North Africa, and Pakistan', 'Export earnings: nonfuel',\n 'Sub-Saharan Africa', 'Export earnings: fuel', 'Western Hemisphere',\n 'World', 'Special Categories', 'Advanced Economies', 'CIS',\n 'Emerging and Developing Economies']\n for country in countries:\n df = extract_relevant_rows(df, column_name='Country Name', column_value=country, not_equal=True)\n df = extract_relevant_rows(df, column_name='Counterpart Country Name', column_value=country, not_equal=True)\n\n # extract exports only from data\n exports = extract_relevant_rows(df, column_name='Indicator Code', column_value='TXG_FOB_USD')\n # extract value attributes only from exports\n export_values = extract_relevant_rows(exports, column_name='Attribute', column_value='Value')\n\n return export_values",
"def _load(self, config: Dict):\n return pd.read_csv(config['path'])",
"def loadData(catalog):\n loadVideos(catalog)\n loadCategories(catalog)",
"def loader():\n bucket = data_load_variables[\"bucket\"]\n\n if data_load_variables[\"use_lite_dataset\"]:\n dataset_name = data_load_variables[\"lite_dataset_name\"]\n else:\n dataset_name = data_load_variables[\"dataset_name\"]\n\n s3 = boto3.client('s3')\n\n obj = s3.get_object(Bucket=bucket, Key=dataset_name)\n # get object and file (key) from bucket\n\n df = pd.read_csv(obj['Body'])\n return df",
"def createDataFrame(path):\n df = pd.read_csv(path)\n df = df[['planet_name', 'planet_mass', 'orbital_radius', 'host_name', \n 'spectral_type', 'stellar_age', 'stellar_radius', \n 'stellar_mass', 'stellar_temperature', 'stellar_luminosity', \n 'optical_magnitude', 'near_ir_magnitude', \n 'stellar_surface_gravity', 'stellar_metallicity']]\n \n df = df.dropna(subset=['spectral_type'])\n df.spectral_type = df.spectral_type.str[0:1]\n df.spectral_type = df.spectral_type.str.strip()\n classification = np.array(['O','B','A','F','G','K','M'])\n df = df[df.spectral_type.isin(classification)]\n df.insert(4, \"amount_of_planets\", 0)\n df.amount_of_planets = df.groupby('host_name')['host_name'].transform('count')\n \n df.planet_mass = np.log10(df.planet_mass)\n df.orbital_radius = np.log10(df.orbital_radius)\n \n df = df.sort_values(by=['host_name'])\n df = df.reset_index(drop=True) \n \n return df",
"def import_csv_data(cr, registry):\n files = ['data/sc.info.csv']\n for file in files:\n tools.convert_file(cr, 'prospects_app', file, None,\n mode='init', noupdate=True, kind='init')",
"def loadData(catalog):\n\n loadArtwork(catalog)\n loadArtists(catalog)",
"def get_data(params):\n username = params['username']\n provider = params[\"provider\"]\n quality = params[\"quality\"]\n speed = params[\"speed\"]\n vpn = params[\"vpn\"]\n platform = params[\"platform\"]\n clean = params[\"clean\"]\n date = params[\"date\"]\n interface = params[\"interface\"]\n csvmode = params[\"csvmode\"]\n path = params['path']\n output_path = './data/collected/'\n\n output_file = '{}_{}_{}_{}_{}_{}_{}_{}.csv'.format(username, provider, quality, speed, vpn, platform, clean, date)\n command = 'python3.8 {} -i {} -s {} {}'.format(path, interface, csvmode, output_file)\n os.system(command)\n \n return",
"def get_df(\n self,\n response: Response,\n parse_dates: Optional[Union[List[int], bool]] = False,\n sort_values: Optional[List[str]] = None,\n reindex_columns: Optional[List[str]] = None,\n ) -> pd.DataFrame:\n\n with io.BytesIO() as buffer:\n try:\n buffer.write(response.content)\n buffer.seek(0)\n z: zipfile.ZipFile = zipfile.ZipFile(buffer)\n\n except zipfile.BadZipFile as e:\n print(\"Bad zip file\", e)\n\n else: # TODO need to annotate csv\n csv = z.open(z.namelist()[0]) # ignores all but first file in zip\n df: pd.DataFrame = pd.read_csv(csv, parse_dates=parse_dates)\n\n df = df.rename(columns={\"PRC\": \"MW\"})\n\n if sort_values:\n df = df.sort_values(sort_values).reset_index(drop=True)\n\n if reindex_columns:\n df = df.reindex(columns=reindex_columns)\n\n return df",
"def loadData(catalog):\n loadArtists(catalog)\n loadArtworks(catalog)"
] | [
"0.6171639",
"0.5896814",
"0.5896814",
"0.58639306",
"0.58639306",
"0.58639306",
"0.58639306",
"0.58639306",
"0.5844626",
"0.57802",
"0.5761707",
"0.5757528",
"0.5681903",
"0.56408125",
"0.5618386",
"0.5584293",
"0.5555248",
"0.5497467",
"0.5468997",
"0.54676163",
"0.54645765",
"0.54604167",
"0.542605",
"0.54117143",
"0.54058087",
"0.53844804",
"0.5375254",
"0.53704023",
"0.5369245",
"0.53685987"
] | 0.75542295 | 0 |
Open a table fits file and convert it to a pandas dataframe. | def import_fits(fitsfile='tgasptyc.fits'):
if isfile(fitsfile):
print "Opening %s.." % fitsfile
table = Table.read(fitsfile)
pandas_df = table.to_pandas()
else:
print "%s not found. Exiting." % fitsfile
sys.exit()
print "Converting table to pandas_df.."
print "..Done"
return pandas_df | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def df_from_fits(filename, i=1):\n return pd.DataFrame.from_records(fitsio.FITS(filename)[i].read().byteswap().newbyteorder())",
"def load_fits_table(fname):\n\treturn fits.open(fname)[1].data",
"def load_fits(path: str, ncols: int, nonames: bool) -> DataFrame:\n assert not nonames\n\n from astropy.table import Table\n df = Table.read(path).to_pandas()\n if ncols:\n df = df[df.columns[:ncols]]\n # Need to mask \"special\" -99 values\n df.replace(-99, np.nan, inplace=True)\n df.replace(-99.9, np.nan, inplace=True)\n return df",
"def table_to_dataframe(file):\n columns = ['instrument', 'dataset', 'flowcell', 'well', \n 'well_tile', 'cell', 'blob', 'position_i', 'position_j',\n 'read', 'quality']\n\n columns_drop = ['instrument', 'flowcell', 'dataset', 'well_tile']\n\n df = pd.read_csv(file, sep='\\s+', header=None, quoting=3)\n df.columns = columns\n df['tile'] = df['well_tile'] % 1000\n df = df.drop(columns_drop, axis=1)\n return df",
"def load(file):\n return pq.read_table(file).to_pandas()",
"def collect_data(data_file):\n dat = Table.read(data_file, format='fits')\n df_bytes = dat.to_pandas() # Convert to pandas dataframe\n df = pd.DataFrame() # Init empty dataframe for converted types\n\n # Convert byte columns to strings\n for column in df_bytes:\n if df_bytes[column].dtype == np.dtype('object'):\n df[column + \"_str\"] = df_bytes[column].str.decode(\"utf-8\")\n df[column] = df[column + \"_str\"].copy(deep=True)\n df.drop(column + \"_str\", axis=1, inplace=True)\n else:\n df[column] = df_bytes[column]\n # Drop infinity values.\n df = df[~df.isin([np.inf, -np.inf]).any(1)]\n return df",
"def load_file_to_dataframe(self, file_path: str) -> pd.DataFrame:\n return pd.read_csv(file_path, sep=\"\\t\")",
"def _parse_fits(filepath):\n hdulist = fits.open(filepath)\n header = OrderedDict(hdulist[0].header)\n # For these NoRH files, the time series data is recorded in the primary\n # HDU\n data = hdulist[0].data\n\n # No explicit time array in FITS file, so construct the time array from\n # the FITS header\n obs_start_time=parse_time(header['DATE-OBS'] + 'T' + header['CRVAL1'])\n length = len(data)\n cadence = np.float(header['CDELT1'])\n sec_array = np.linspace(0, length-1, (length/cadence))\n\n norh_time = []\n for s in sec_array:\n norh_time.append(obs_start_time + datetime.timedelta(0,s))\n\n return header, pandas.DataFrame(data, index=norh_time)",
"def read_table(file_name: Union[str, Path], **kwargs):\n\tfile_name = Path(file_name)\n\textension = file_name.suffix\n\tdefault_args = {\n\t\t'.csv': {'delimiter': ','},\n\t\t'.tsv': {'delimiter': '\\t'}\n\t}\n\n\t# arguments = self._cleanArguments(extension, arguments)\n\tfile_name = str(file_name.absolute())\n\tif extension in {'.xls', '.xlsx', '.xlsm'}: # .xlsm is not a typo.\n\n\t\tdf = pandas.read_excel(file_name, **kwargs)\n\telif extension in {'.csv', '.tsv', '.fsv', '.txt'}:\n\t\targuments = {**default_args.get(extension), **kwargs}\n\t\tif 'sheetname' in arguments: arguments.pop('sheetname')\n\t\tdf = pandas.read_table(file_name, **arguments)\n\telif extension == '.pkl':\n\t\tdf = pandas.read_pickle(file_name)\n\telse:\n\t\traise NameError(\"{} does not have a valid extension!\".format(file_name))\n\treturn df",
"def read_pipe_table_to_pandas(filename):\n\n astropy_data = astropy.io.ascii.read(filename)\n data_stream = StringIO()\n astropy_data[2:].write(data_stream, format='ascii.basic', delimiter='|')\n data_stream.seek(0)\n return pandas.read_csv(data_stream,\n comment='#',\n sep='|',\n skipinitialspace=True)",
"def _pdread2astrotable(csvgzdir):\n df = pd.read_csv(csvgzdir)\n tb = Table.from_pandas(df)\n return tb",
"def parse(self):\n if self.filename.endswith('.gz'):\n compression = 'gzip'\n elif self.filename.endswith('.bz2'):\n compression = 'bz2'\n else:\n compression = None\n df = pd.read_table(self.filename, compression=compression)\n\n # drop empty column from extra tab\n df.dropna(axis=1, how='all', inplace=True)\n return df",
"def read_file(fname: str) -> pd.DataFrame:\n raw_data = (\n pd.read_hdf(fname).to_frame().reset_index(level=[0, 1]).loc[ANALYSIS_DATE]\n )\n raw_data[\"date\"] = raw_data.index\n return raw_data",
"def data_from_fits(fits_file):\n hdul = fits.open(fits_file)\n data = hdul[0].data\n return data",
"def _read_tab(pth):\n if not os.path.exists(pth):\n raise SampleTableFileException(\n \"File does not exist: {}\".format(pth))\n read_csv_kwargs = {\"engine\": \"python\", \"dtype\": str,\n \"index_col\": False, \"keep_default_na\": False,\n \"na_values\": [\"\"]}\n return pd.read_csv(pth, sep=infer_delimiter(pth), **read_csv_kwargs)",
"def collect_data(file = 'osc+otc-Assembled.fits'):\n dat = Table.read(file, format='fits')\n df_bytes = dat.to_pandas() # Convert to pandas dataframe\n df = pd.DataFrame() # Init empty dataframe for converted types\n\n # Convert byte columns to strings\n for column in df_bytes:\n if df_bytes[column].dtype == np.dtype('object'):\n df[column + \"_str\"] = df_bytes[column].str.decode(\"utf-8\")\n df[column] = df[column + \"_str\"].copy()\n df.drop(column + \"_str\", axis = 1, inplace = True)\n else:\n df[column] = df_bytes[column]\n\n # Prints sum of NULL values by column\n # df.isnull().sum().to_csv(output_dir + \"Missing_Values.csv\")\n return df",
"def OSW2df(osw_file, table_name):\n conn = connOSW(osw_file)\n df = pd.read_sql_query(\"SELECT * FROM \" + table_name, conn)\n conn.close()\n return df",
"def run(self) -> pd.DataFrame:\n with open(self.file_path, 'r') as in_file:\n headers = in_file.readline()\n headers = headers.replace(\"\\n\", \"\")\n\n if ',' in headers:\n headers = headers.split(',')\n else:\n headers = headers.split()\n\n if headers == self.NORMAL_HEADERS:\n return self.normal_csv()\n else:\n return self.read_data_columns_to_rows()",
"def read_table(cls, filepath_or_buffer, *args, **vargs):\n if filepath_or_buffer.endswith('.csv') and 'sep' not in vargs:\n vargs['sep'] = ','\n df = pandas.read_table(filepath_or_buffer, *args, **vargs)\n labels = df.columns\n return Table([df[label].values for label in labels], labels)",
"def load_data(file_name):\n return Orange.data.Table(file_name)",
"def table_to_data_frame(table):\n nr = table.rowCount()\n nc = table.columnCount()\n\n if nr == 0:\n return None\n\n idx_labels = []\n for i in range(nr):\n item = table.verticalHeaderItem(i)\n if item is not None:\n idx_labels.append(item.text().replace(' ', ''))\n else:\n idx_labels.append(i)\n\n col_labels = []\n for i in range(nc):\n item = table.horizontalHeaderItem(i)\n if item is not None:\n col_labels.append(item.text().replace(' ', ''))\n else:\n col_labels.append(i)\n\n tdata = []\n for i in range(nr):\n ldata = []\n for j in range(nc):\n value = table.item(i, j).text()\n if str_is_float(value):\n value = float(value)\n ldata.append(value)\n tdata.append(ldata)\n df = _pd.DataFrame(_np.array(tdata), index=idx_labels, columns=col_labels)\n\n return df",
"def load_raw_table(conf, table):\n confrd = load_config_raw_data(conf)\n path_table = Path(confrd[table][\"path\"])\n sep = confrd[table][\"sep\"]\n encoding = confrd[table][\"encoding\"]\n df = pd.read_csv(path_table, sep=sep, encoding=encoding)\n return df",
"def import_experiments_table(path):\n return pd.read_csv(path, sep=\"\\t\", skiprows=1, header=0)",
"def import_files_table(path):\n return pd.read_csv(path, sep=\"\\t\", skiprows=1, header=0)",
"def load_tsv(path: str, ncols: int, nonames: bool) -> DataFrame:\n cols = range(ncols) if ncols else None\n return pandas.read_csv(path, usecols=cols, sep='\\t', skipinitialspace=True, header='infer' if not nonames else None)",
"def read_bed_file(path, labelnum=0):\n\n bed_df = pd.read_table(path, sep=\"\\t\", header=None)\n colnames = generate_colnames(bed_df, labelnum)\n bed_df.columns = colnames\n print(bed_df.head())\n return bed_df",
"def import_tables(file, pages):\n tables = camelot.read_pdf(\n file, pages=pages,\n flavor='stream',\n )\n return tables",
"def read_df_from_binary(file_name_mask):\n data = read_matrix_from_binary(file_name_mask + '-value.bin')\n with open(file_name_mask + '-name.txt', 'r') as f:\n index = f.readline().strip().split('\\t')\n columns = f.readline().strip().split('\\t')\n return pandas.DataFrame(data=data, index=index, columns=columns)",
"def read(filename, replace_columns=True):\n f = open(filename)\n lines = f.readlines()\n f.close()\n\n # Extract column names from the odt file.\n for i, line in enumerate(lines):\n if line.startswith('# Columns:'):\n columns = []\n odt_section = i # Should be removed after runs are split.\n for part in re.split('Oxs_|Anv_|Southampton_', line)[1:]:\n for char in [\"{\", \"}\", \" \", \"\\n\"]:\n part = part.replace(char, '')\n if replace_columns:\n if part in columns_dic.keys():\n columns.append(columns_dic[part])\n else:\n msg = \"Entry {} not in lookup table.\".format(part)\n raise ValueError(msg)\n else:\n columns.append(part)\n\n # Extract units from the odt file.\n for i, line in enumerate(lines):\n if line.startswith('# Units:'):\n units = line.split()[2:]\n\n # Extract the data from the odt file.\n data = []\n for i, line in enumerate(lines[odt_section:]):\n if not line.startswith(\"#\"):\n data.append([float(number) for number in line.split()])\n\n df = pd.DataFrame(data, columns=columns)\n # next line is required to allow adding list-like attribute to pandas DataFrame\n # see https://github.com/pandas-dev/pandas/blob/2f9d4fbc7f289a48ed8b29f573675cd2e21b2c89/pandas/core/generic.py#L3631\n df._metadata.append('units')\n df.units = dict(zip(columns, units))\n return df",
"def read_fit_column(file):\n\n # Data was pulled out of an exposure by modifying residual_fringe.py to write out a column of data\n # The function we are testing is fit_1d_background_complex.\n\n file_dir = Path(__file__).parent.resolve()\n file_path = str(file_dir / file)\n\n with fits.open(file_path) as hdu:\n col_data = hdu[1].data\n col_weight = hdu[2].data\n col_wnum = hdu[3].data\n bg_fit = hdu[4].data\n store_freq = hdu[0].header['FFREQ']\n\n return col_data, col_weight, col_wnum, bg_fit, store_freq"
] | [
"0.72136354",
"0.70939803",
"0.6888955",
"0.6693443",
"0.65260655",
"0.64546525",
"0.64119506",
"0.6292741",
"0.60741687",
"0.6048607",
"0.59836",
"0.59633166",
"0.5962561",
"0.5943778",
"0.59254146",
"0.5899131",
"0.5885488",
"0.586403",
"0.57661194",
"0.5747174",
"0.57433605",
"0.57226944",
"0.5703075",
"0.5671623",
"0.5671411",
"0.5609038",
"0.5608039",
"0.5604904",
"0.5603439",
"0.55851346"
] | 0.7552445 | 0 |
creates a new column 'tycho2_id' in the tycho2 catalog. This is for comparison with the TGAS catalog. | def create_tycho_id(tycho2df):
tycho2df['tycho2_id'] = tycho2df.TYC1.astype(str).str.cat(tycho2df.TYC2.astype(str), sep='-')\
.str.cat(tycho2df.TYC3.astype(str), sep='-')
tycho2df = tycho2df.rename(columns={'HIP': 'hip'})
return tycho2df | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_idx2id(self, id2idx = None):\n if id2idx is None:\n return {v:k for k, v in self.id2idx.items()}\n return {v:k for k, v in id2idx.items()}",
"def getMcc2Id(self):\n return self._base.getMcc2Id()",
"def get_col2id( self, ratios_standardized, db ):\n\t\tcol_info_collection = db.col_info\n\t\tcol_name = []\n\t\tcol_id = []\n\t\tfor i in col_info_collection.find():\n\t\t\tcol_name.append(i[\"egrin2_col_name\"])\n\t\t\tcol_id.append(i[\"col_id\"])\n\t\tfor i in ratios_standardized.columns.values:\n\t \t\tif i not in col_name:\n\t \t\t\tcol_name.append( i )\n\t \t\t\tif len(col_id) > 0:\n\t\t\t\t\tcol_id.append(max(col_id)+1)\n\t\t\t\telse:\n\t\t\t\t\tcol_id.append(0)\n\t \tcol_info = pd.DataFrame( zip( col_id, col_name ), index = col_name, columns = [ \"col_id\", \"egrin2_col_name\"] )\n\t \treturn col_info",
"def add_column_into_target_sf(self, tap_type, table, new_column):\n self.run_query_target_snowflake(\n f'ALTER TABLE ppw_e2e_tap_{tap_type}{self.sf_schema_postfix}.{table} ADD {new_column[\"name\"]} int'\n )\n self.run_query_target_snowflake(\n f'UPDATE ppw_e2e_tap_{tap_type}{self.sf_schema_postfix}.{table}'\n f' SET {new_column[\"name\"]}={new_column[\"value\"]} WHERE 1=1'\n )",
"def getMcc2Id(self):\n return NotImplementedError",
"def trilha2(self, trilha2):\n self._trilha2 = trilha2",
"def tag_id2(self, tag_id2):\n if self._configuration.client_side_validation and tag_id2 is None:\n raise ValueError(\"Invalid value for `tag_id2`, must not be `None`\") # noqa: E501\n\n self._tag_id2 = tag_id2",
"def getMcc2Id(self):\n return self.mcc2id",
"def getMcc2Id(self):\n return self.mcc2id",
"def getMcc2Id(self):\n return self.mcc2id",
"def getMcc2Id(self):\n return self.mcc2id",
"def _getNewCatId(self):\n\n newCatId = COCO_PLUS.CAT_ID\n COCO_PLUS.CAT_ID += 1\n\n return newCatId",
"def make_category_table_level2(category_level2_table, category_table):\n # Create a dict mapping 'category_level1_names' to 'category_level1_index'\n category_name2label_level2 = {}\n for item_level2 in category_level2_table.itertuples():\n category_name = item_level2[1]\n category_idx = item_level2[2]\n category_name2label_level2[category_name] = category_idx\n # Create a dict mapping 'category_id' to 'category_level1_index'\n category_id2label_level2 = {}\n for item in category_table.itertuples():\n category_id = item[0]\n category_idx = category_name2label_level2[item[2]]\n category_id2label_level2[category_id] = category_idx\n return category_id2label_level2",
"def add_column_into_source(self, tap_type, table, new_column):\n run_query_method = getattr(self, f'run_query_tap_{tap_type}')\n run_query_method(\n f'ALTER TABLE {table} ADD {new_column[\"name\"]} int'\n )\n run_query_method(\n f'UPDATE {table} set {new_column[\"name\"]}={new_column[\"value\"]} where 1=1'\n )",
"def secondary_id(self, value):\n self._write(MX_SECONDARY_ID, value)",
"def oeid_to_existing_extid(self, cr, uid, referential_id, openerp_id, context=None):\n return self.get_extid(cr, uid, openerp_id, referential_id, context=context)",
"def catalog_id(self):\n return self._catalog_id",
"def create_table(self):\n self.db.query(f\"\"\"\n CREATE TABLE IF NOT EXISTS {self.table} (\n substitut_id bigint unsigned references product(id),\n original_id bigint unsigned references product(id),\n PRIMARY KEY (substitut_id, original_id)\n )\n \"\"\")",
"def newid(self, target_table):\n self.new_id[target_table] += 1\n return self.new_id[target_table]",
"def _output_imei_column(self):\n if self._generate_check_digit:\n imei_col_name = sql.Identifier('imei_norm_with_check_digit')\n else:\n imei_col_name = sql.Identifier('imei_norm')\n return imei_col_name",
"def _get_country_id(self, code2):\n if not hasattr(self, '_country_codes'):\n self._country_codes = {}\n\n if code2 not in self._country_codes.keys():\n self._country_codes[code2] = Country.objects.get(code2=code2).pk\n return self._country_codes[code2]",
"def get_n2(cls):\n return cls.objects.get(pk='N2')",
"def get_catalog_id(self):\n return self._catalog_id",
"def insert_column(self, tb_name, column_name, data_type):\n sentences = f\"\"\"\n ALTER TABLE {tb_name} ADD COLUMN {column_name} {data_type};\n \"\"\"\n print(sentences)\n self.commit(sentences)",
"def table_catalog_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"table_catalog_id\")",
"def table_catalog_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_catalog_id\")",
"def get_actual_id(translated):",
"def secondary_id(self):\n return self._read(MX_SECONDARY_ID)",
"def catalog_id(self, catalog_id):\n self._catalog_id = catalog_id",
"def tdim2(dim2):\n transformers = [Enumerate(dim2.categories), OneHotEncode(len(dim2.categories))]\n tdim2 = TransformedDimension(Compose(transformers, dim2.type), dim2)\n return tdim2"
] | [
"0.50282025",
"0.50220966",
"0.5005243",
"0.49620757",
"0.49583018",
"0.4915321",
"0.4880197",
"0.4807147",
"0.4807147",
"0.4807147",
"0.4807147",
"0.47635424",
"0.47528297",
"0.46979147",
"0.4659034",
"0.4647828",
"0.45996457",
"0.45982006",
"0.4572538",
"0.456453",
"0.4558992",
"0.45358315",
"0.45353693",
"0.45138946",
"0.45115072",
"0.4509936",
"0.4496806",
"0.44886637",
"0.44786626",
"0.44775993"
] | 0.7335801 | 0 |
select data with relative parallax error less than 'cutoff', add absolute magnitude columns for plotting. If catalog is not None, the cutoff on BV will not be applied (ensures initial variable stars DataFrame is not constrained in magnitudes) | def data_process(df_toprocess=None, cutoff=0.2, bv_cutoff=0.15, catalog=None):
print "Selecting objects.."
df_toprocess['sigma_pi/pi'] = df_toprocess.loc[:, 'parallax_error'].astype(float) / df_toprocess.loc[:, 'parallax']\
.astype(float)
print "..Done\nCutoff at relative parallax error of %s\n----------" % cutoff
# only take objects with relative parallax error < cutoff
df_toprocess = df_toprocess.loc[df_toprocess.loc[:, 'parallax'] /
df_toprocess.loc[:, 'parallax_error'] > 1. / cutoff]
print catalog
if catalog is None:
print "Replacing whitespace with nan"
df_toprocess = df_toprocess.replace(' ', np.nan) # some cells are ' ' instead of nan
print "Converting BTmag and VTmag to floats.."
df_toprocess.BTmag = df_toprocess.BTmag.astype(float)
df_toprocess.VTmag = df_toprocess.VTmag.astype(float)
# Some values are NaN:
print "Removing objects with missing BT or VT measurements.."
df_toprocess = df_toprocess[df_toprocess.BTmag.notnull()]
df_toprocess = df_toprocess[df_toprocess.VTmag.notnull()]
print "Computing B-V and M_V.."
df_toprocess['B_V'] = df_toprocess.BTmag - df_toprocess.VTmag
df_toprocess['M_V'] = df_toprocess.VTmag - 5. * (np.log10(1000. / df_toprocess.parallax) - 1.)
print "Converting sigma BT and sigma VT to float.."
df_toprocess.e_BTmag = df_toprocess.e_BTmag.astype(float)
df_toprocess.e_VTmag = df_toprocess.e_VTmag.astype(float)
print "Computing sigma B-V.."
df_toprocess['e_B_V'] = np.sqrt(df_toprocess.e_BTmag.pow(2)+df_toprocess.e_VTmag.pow(2))
print "Applying selection on sigma BT-VT < %s.." % bv_cutoff
df_toprocess = df_toprocess[df_toprocess.e_B_V < bv_cutoff]
if catalog == 'xmatch_TGAS_Simbad.csv':
df_toprocess = df_toprocess.loc[(df_toprocess['J'] < 11.) & (df_toprocess['K'] < 11.)]
print "min in J: %s" % np.max(df_toprocess['J'])
print "max in J: %s" % np.min(df_toprocess['J'])
df_toprocess.insert(10, 'B_V', df_toprocess.loc[:, 'B'] - df_toprocess.loc[:, 'V'])
df_toprocess.insert(10, 'J_K', df_toprocess.loc[:, 'J'] - df_toprocess.loc[:, 'K'])
df_toprocess.insert(10, 'M_G', df_toprocess.loc[:, 'phot_g_mean_mag'] - 5. *
(np.log10(1000. / df_toprocess.loc[:, 'parallax']) - 1.))
df_toprocess.insert(10, 'M_J', df_toprocess.loc[:, 'J'] - 5. *
(np.log10(1000. / df_toprocess.loc[:, 'parallax']) - 1.))
df_toprocess.insert(10, 'M_K', df_toprocess.loc[:, 'K'] - 5. *
(np.log10(1000. / df_toprocess.loc[:, 'parallax']) - 1.))
if catalog == 'xmatch_TGAS_VSX.csv':
df_toprocess = df_toprocess[df_toprocess.V == 0]
print "%s objects selected" % len(df_toprocess)
print "..Done\n----------"
return df_toprocess | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def localize_red_clump(star_catalog,close_cat_idx,log):\n\n def select_within_range(mags, colours, mag_min, mag_max, col_min, col_max):\n \"\"\"Function to identify the set of array indices with values\n between the range indicated\"\"\"\n\n idx1 = np.where(colours >= col_min)[0]\n idx2 = np.where(colours <= col_max)[0]\n idx3 = np.where(mags >= mag_min)[0]\n idx4 = np.where(mags <= mag_max)[0]\n idx = set(idx1).intersection(set(idx2))\n idx = idx.intersection(set(idx3))\n idx = list(idx.intersection(set(idx4)))\n\n return idx\n\n RC = photometry_classes.Star()\n\n inst_i = star_catalog['cal_ref_mag_ip'][close_cat_idx]\n inst_r = star_catalog['cal_ref_mag_rp'][close_cat_idx]\n inst_g = star_catalog['cal_ref_mag_gp'][close_cat_idx]\n cal_i = star_catalog['imag'][close_cat_idx]\n cal_r = star_catalog['rmag'][close_cat_idx]\n cal_g = star_catalog['gmag'][close_cat_idx]\n inst_ri = inst_r - inst_i # Catalogue column order is red -> blue\n inst_gi = inst_g - inst_i\n inst_gr = inst_g - inst_r\n cal_ri = cal_r - cal_i\n cal_gi = cal_g - cal_i\n cal_gr = cal_g - cal_r\n\n log.info('\\n')\n log.info('Localizing the Red Clump')\n log.info('Median (r-i), i: '+str(np.median(inst_ri))+', '+str(np.median(inst_i)))\n log.info('Median (g-i), i: '+str(np.median(inst_gi))+', '+str(np.median(inst_i)))\n log.info('Median (g-r), g: '+str(np.median(inst_gr))+', '+str(np.median(inst_g)))\n\n ri_min = 0.8\n ri_max = 1.2\n i_min = 15.5\n i_max = 16.5\n\n r_min = 16.2\n r_max = 17.5\n\n gi_min = 2.5\n gi_max = 3.5\n\n gr_min = 1.5\n gr_max = 2.2\n g_min = 17.8\n g_max = 19.5\n\n log.info('Selected Red Clump giants between:')\n log.info('i = '+str(i_min)+' to '+str(i_max))\n log.info('r = '+str(r_min)+' to '+str(r_max))\n log.info('(r-i) = '+str(ri_min)+' to '+str(ri_max))\n log.info('g = '+str(g_min)+' to '+str(g_max))\n log.info('(g-r) = '+str(gr_min)+' to '+str(gr_max))\n log.info('(g-i) = '+str(gi_min)+' to '+str(gi_max))\n\n idx = select_within_range(inst_i, inst_ri, i_min, i_max, ri_min, ri_max)\n\n (RC.ri, RC.sig_ri, RC.i, RC.sig_i) = calc_distribution_centroid_and_spread_2d(inst_ri[idx], inst_i[idx], use_iqr=True)\n\n idx = select_within_range(inst_r, inst_ri, r_min, r_max, ri_min, ri_max)\n\n (RC.r, RC.sig_r) = calc_distribution_centre_and_spread(inst_r[idx], use_iqr=True)\n\n idx = select_within_range(inst_g, inst_gr, g_min, g_max, gr_min, gr_max)\n\n (RC.gr, RC.sig_gr, RC.g, RC.sig_g) = calc_distribution_centroid_and_spread_2d(inst_gr[idx], inst_g[idx], use_iqr=True)\n\n idx = select_within_range(inst_g, inst_gi, g_min, g_max, gi_min, gi_max)\n\n (RC.gi, RC.sig_gi, RC.g, RC.sig_g) = calc_distribution_centroid_and_spread_2d(inst_gi[idx], inst_g[idx], use_iqr=True)\n\n log.info('\\n')\n log.info('Centroid of Red Clump Stars at:')\n log.info(RC.summary(show_mags=True))\n log.info(RC.summary(show_mags=False,show_colours=True))\n\n RC.transform_to_JohnsonCousins()\n\n log.info(RC.summary(show_mags=False,johnsons=True))\n\n return RC",
"def do_lowzcut_check(cat, subdir):\n lowzcut = cat.lowzcut\n cat.lowzcut = True\n cat.plot_omega_dla(zmax=5,label=\"Cutting\")\n cat.lowzcut = False\n cat.plot_omega_dla(zmax=5,label=\"Not cutting\")\n plt.legend(loc=0)\n save_figure(path.join(subdir,\"omega_gp_lowz\"))\n plt.clf()\n\n cat.lowzcut = True\n cat.plot_line_density(zmax=5,label=\"Cutting\")\n cat.lowzcut = False\n cat.plot_line_density(zmax=5,label=\"Not cutting\")\n plt.ylim(0,0.12)\n plt.legend(loc=0)\n save_figure(path.join(subdir,\"dndx_gp_lowz\"))\n plt.clf()\n cat.lowzcut = lowzcut",
"def FE_find_and_cap_outliers(df, features, drop=False, verbose=False):\r\n df = df.copy(deep=True)\r\n outlier_indices = []\r\n idcol = 'idcol'\r\n df[idcol] = range(len(df))\r\n if isinstance(features, str):\r\n features = [features]\r\n # iterate over features(columns)\r\n for col in features:\r\n # Determine a list of indices of outliers for feature col\r\n thresh = outlier_determine_threshold(df, col)\r\n mask_outliers = is_outlier(df[col], thresh=thresh).astype(int)\r\n dfout_index = df.iloc[np.where(mask_outliers>0)].index\r\n\r\n df['anomaly1'] = 0\r\n df.loc[dfout_index ,'anomaly1'] = 1\r\n\r\n ### this is how the column looks now before capping outliers\r\n if verbose:\r\n fig, (ax1,ax2) = plt.subplots(1,2,figsize=(12,5))\r\n colors = {0:'blue', 1:'red'}\r\n ax1.scatter(df[idcol], df[col], c=df[\"anomaly1\"].apply(lambda x: colors[x]))\r\n ax1.set_xlabel('Row ID')\r\n ax1.set_ylabel('Target values')\r\n ax1.set_title('%s before capping outliers' %col)\r\n\r\n capped_value = df.loc[dfout_index, col].min() ## this is the value we cap it against\r\n df.loc[dfout_index, col] = capped_value ## maximum values are now capped\r\n ### you are now good to go - you can show how they are capped using before and after pics\r\n if verbose:\r\n colors = {0:'blue', 1:'red'}\r\n ax2.scatter(df[idcol], df[col], c=df[\"anomaly1\"].apply(lambda x: colors[x]))\r\n ax2.set_xlabel('Row ID')\r\n ax2.set_ylabel('Target values')\r\n ax2.set_title('%s after capping outliers' %col)\r\n\r\n # Let's save the list of outliers and see if there are some with outliers in multiple columns\r\n outlier_indices.extend(dfout_index)\r\n\r\n # select certain observations containing more than one outlier in 2 columns or more. We can drop them!\r\n outlier_indices = Counter(outlier_indices)\r\n multiple_outliers = list( k for k, v in outlier_indices.items() if v > 3 )\r\n ### now drop these rows altogether ####\r\n df.drop([idcol,'anomaly1'], axis=1, inplace=True)\r\n if drop:\r\n print('Shape of dataframe before outliers being dropped: %s' %(df.shape,))\r\n number_of_rows = df.shape[0]\r\n df.drop(multiple_outliers, axis=0, inplace=True)\r\n print('Shape of dataframe after outliers being dropped: %s' %(df.shape,))\r\n print('\\nNumber_of_rows with multiple outliers in more than 3 columns which were dropped = %d' %(number_of_rows-df.shape[0]))\r\n return df",
"def analyse_colour_mag_diagrams(params,star_catalog,catalog_header,\n target,source,blend,RC,\n det_idx,cat_idx,close_cat_idx,log):\n\n tol = 2.0\n\n filters = { 'ip': 'SDSS-i', 'rp': 'SDSS-r', 'gp': 'SDSS-g' }\n\n inst_i = star_catalog['cal_ref_mag_ip'][det_idx]\n inst_r = star_catalog['cal_ref_mag_rp'][det_idx]\n inst_g = star_catalog['cal_ref_mag_gp'][det_idx]\n cal_i = star_catalog['imag'][cat_idx]\n cal_r = star_catalog['rmag'][cat_idx]\n cal_g = star_catalog['gmag'][cat_idx]\n inst_ri = inst_r - inst_i # Catalogue column order is red -> blue\n inst_gr = inst_g - inst_r\n inst_gi = inst_g - inst_i\n cal_ri = cal_r - cal_i\n cal_gr = cal_g - cal_r\n cal_gi = cal_g - cal_i\n\n linst_i = star_catalog['cal_ref_mag_ip'][close_cat_idx]\n linst_r = star_catalog['cal_ref_mag_rp'][close_cat_idx]\n linst_g = star_catalog['cal_ref_mag_gp'][close_cat_idx]\n lcal_i = star_catalog['imag'][close_cat_idx]\n lcal_r = star_catalog['rmag'][close_cat_idx]\n lcal_g = star_catalog['gmag'][close_cat_idx]\n linst_ri = linst_r - linst_i # Catalogue column order is red -> blue\n linst_gr = linst_g - linst_r\n linst_gi = linst_g - linst_i\n lcal_ri = lcal_r - lcal_i\n lcal_gr = lcal_g - lcal_r\n lcal_gi = lcal_g - lcal_i\n\n plot_colour_mag_diagram(params,inst_i, inst_ri, linst_i, linst_ri, target,\n source, blend, RC, 'r', 'i', 'i', tol, log)\n\n plot_colour_mag_diagram(params,inst_r, inst_ri, linst_r, linst_ri, target,\n source, blend, RC, 'r', 'i', 'r', tol, log)\n\n plot_colour_mag_diagram(params,inst_g, inst_gr, linst_g, linst_gr, target,\n source, blend, RC, 'g', 'r', 'g', tol, log)\n\n plot_colour_mag_diagram(params,inst_g, inst_gi, linst_g, linst_gi, target,\n source, blend, RC, 'g', 'i', 'g', tol, log)",
"def get_ptf10iuv(colorplt = False):\n z = 0.0251485\n ebv = 0.0371 # SFD\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n print (\"adopt g band t_max estimated by myself\")\n t_max = 55357.387 \n tb = pd.read_csv('../data/otherSN/Kasliwal2012/PTF10iuv', sep='\\t')\n tb = tb.drop(columns=[\"Unnamed: 4\"])\n tb = tb.rename(columns={'Filter' : 'filter',\n 'MJD': 'mjd'})\n tb = tb[~np.array([x[0]=='>' for x in tb['Mag'].values])]\n tb['mag'] = np.array([float(x.split(\" +or-\")[0]) for x in tb['Mag'].values])\n tb['emag'] = np.array([float(x.split(\" +or-\")[1]) for x in tb['Mag'].values])\n tb = tb.drop(columns=[\"Mag\"])\n \n ixg = tb['filter'].values == \"g\"\n ixr = tb['filter'].values == \"r\"\n ixi = tb['filter'].values == \"i\"\n ixz = tb['filter'].values == \"z\"\n ixB = tb['filter'].values == \"B\"\n tb['wave'] = np.zeros(len(tb))\n tb['wave'].values[ixB] = 4359\n tb['wave'].values[ixg] = 4814\n tb['wave'].values[ixr] = 6422\n tb['wave'].values[ixi] = 7883\n tb['wave'].values[ixz] = 9670\n \n tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)\n tb['mag0_abs'] = tb['mag0'] - dis_mod\n tb['tmax_rf'] = (tb['mjd'] - t_max) / (1+z)\n tb = tb.sort_values(by = \"mjd\")\n if colorplt==False:\n return tb\n \n else:\n tb = add_datecol(tb)\n ix = np.in1d(tb[\"filter\"].values, np.array(['g', 'r', 'i']))\n tb = tb[ix]\n tb = tb[tb.mjd > 55352.5]\n tb = tb[tb.mjd < 55593.5]\n \n dates = get_date_span(tb)\n datesave = []\n for i in range(len(dates)):\n x = dates[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n if len(tbsub)!=0:\n flts = tbsub['filter'].values\n if \"r\" in flts and np.sum(np.unique(flts))!=1:\n datesave.append(x)\n datesave = np.array(datesave)\n \n mcolor = []\n mcolor_unc = []\n mjds = []\n colorname = []\n for i in range(len(datesave)):\n x = datesave[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n gtb = tbsub[tbsub[\"filter\"].values==\"g\"]\n rtb = tbsub[tbsub[\"filter\"].values==\"r\"]\n itb = tbsub[tbsub[\"filter\"].values==\"i\"]\n if len(gtb)!=0:\n gmjds = gtb[\"mjd\"].values\n gmags = gtb[\"mag0\"].values\n gemags = gtb[\"emag\"].values\n gwtgs = 1/gemags**2\n gmag = np.sum(gmags * gwtgs) / np.sum(gwtgs)\n gmjd = np.sum(gmjds * gwtgs) / np.sum(gwtgs)\n gemag = 1/ np.sqrt(np.sum(gwtgs))\n if len(rtb)!=0:\n rmjds = rtb[\"mjd\"].values\n rmags = rtb[\"mag0\"].values\n remags = rtb[\"emag\"].values\n rwtgs = 1/remags**2\n rmag = np.sum(rmags * rwtgs) / np.sum(rwtgs)\n rmjd = np.sum(rmjds * rwtgs) / np.sum(rwtgs)\n remag = 1/ np.sqrt(np.sum(rwtgs))\n if len(itb)!=0:\n imjds = itb[\"mjd\"].values\n imags = itb[\"mag0\"].values\n iemags = itb[\"emag\"].values\n iwtgs = 1/iemags**2\n imag = np.sum(imags * iwtgs) / np.sum(iwtgs)\n imjd = np.sum(imjds * iwtgs) / np.sum(iwtgs)\n iemag = 1/ np.sqrt(np.sum(iwtgs))\n if len(gtb)!=0 and len(rtb)!=0:\n mcolor.append(gmag - rmag)\n mjds.append( 0.5 * (gmjd + rmjd) )\n mcolor_unc.append( np.sqrt(gemag**2 + remag**2) )\n colorname.append(\"gmr\")\n if len(rtb)!=0 and len(itb)!=0:\n mcolor.append(rmag - imag)\n mjds.append( 0.5 * (rmjd + imjd) )\n mcolor_unc.append( np.sqrt(remag**2 + iemag**2) )\n colorname.append(\"rmi\")\n \n ctb = Table(data = [mjds, mcolor, mcolor_unc, colorname],\n names = [\"mjd\", \"c\", \"ec\", \"cname\"])\n \n ctb['tmax_rf'] = (ctb['mjd'] - t_max) / (1+z)\n ctb = ctb.to_pandas()\n return ctb",
"def vscat(a,fig=None,ls=None,marker='o',nmin=2,mhmin=-3,density=False,out=None) :\n if fig == None : fig,ax=plots.multi(4,6,hspace=0.001,wspace=0.4,figsize=(12,8))\n else : fig,ax=fig\n tbins=[3000,3500,4000,4500,5500,8000,30000] \n hbins=[8,11,12,13,15]\n try: snr = a['SNREV']\n except: snr=a['SNR']\n j=np.where(snr > 300) [0]\n snr[j] = 300\n for i in range(len(tbins)-1) :\n ax[i,0].text(0.9,0.9,'{:d}<=RV_TEFF<{:d}'.format(tbins[i],tbins[i+1]),ha='right',transform=ax[i,0].transAxes,fontsize=8)\n for j in range(len(hbins)-1) :\n ax[0,j].set_title('{:d}<=H<{:d}'.format(hbins[j],hbins[j+1]))\n gd = np.where((a['RV_TEFF']>=tbins[i]) & (a['RV_TEFF']<tbins[i+1]) &\n (a['H']>=hbins[j]) & (a['H']<hbins[j+1]) &\n (a['NVISITS']>nmin) & (a['RV_FEH']>mhmin) & (a['VSCATTER'] > 0)) [0]\n print(tbins[i],tbins[i+1],hbins[j],hbins[j+1],nmin,len(gd))\n try :\n #plots.plotc(ax[i,2],snr[gd],a['VSCATTER'][gd],a['RV_FEH'][gd],marker=marker,xr=[0,310],yr=[0,1],xt='S/N',yt='VSCATTER')\n ax[i,j].hist(a['VSCATTER'][gd],bins=np.arange(0,1,0.01),ls=ls,histtype='step',color=colors[j],normed=density)\n ax[i,j].set_xlabel('VSCATTER (km/s)')\n ax[i,j].plot([0.1,0.1],ax[i,j].get_ylim())\n #ax[i,1].hist(a['VSCATTER'][gd],bins=np.arange(0,1,0.01),histtype='step',cumulative=True,normed=True,ls=ls,color=colors[j])\n #ax[i,1].set_xlabel('VSCATTER')\n except : pass\n\n if out is not None : \n fig.savefig(out+'.png')\n plt.close()\n\n fig.suptitle('NVISITS>{:d} [M/H]>{:6.2f}'.format(nmin,mhmin))\n return fig,ax",
"def completeness(input_sources_cat,detected_sources_cat,output_fname,cat_falsedet,Mag_lim,pix_radius):\n\n #Load catalogues in table\n input_cat=ascii.read('%s.txt' % input_sources_cat)\n detected_cat=ascii.read('%s.cat' % detected_sources_cat) \n #print (input_cat)\n #print (detected_cat)\n print ('Number of sources in stuff catalog below the mag lim of %.2f: %d' % (Mag_lim,len(input_cat[input_cat['MAG']<Mag_lim])))\n print ('Number of sources detected: %d \\n' % len(detected_cat))\n\n #Pixel radius\n pixradius=pix_radius\n\n nb=0\n i=0\n det=np.zeros(len(input_cat))\n x_det_list=np.zeros(len(input_cat))\n y_det_list=np.zeros(len(input_cat))\n mag_sex=np.zeros(len(input_cat))\n\n col_det=Column(name='detected',data=det)\n x_det_coord=Column(name='x_coord_det',data=x_det_list)\n y_det_coord=Column(name='y_coord_det',data=y_det_list)\n mag_det=Column(name='mag_det',data=mag_sex)\n input_cat.add_columns([col_det,x_det_coord,y_det_coord,mag_det])\n\n col_det_sex=Column(name='detected',data=np.zeros(len(detected_cat)))\n detected_cat.add_columns([col_det_sex])\n\n\n for x1, y1 in zip (detected_cat['XPEAK_IMAGE'], detected_cat['YPEAK_IMAGE']):\n #print ('object n. {0:d} at position: {1:.2f}-{2:.2f} \\n'.format(nb,x1,y1))\n min_dist=1e40\n j=0\n x_det=-1;y_det=-1;\n for x2,y2,mag in zip(input_cat['COORD_XPIXEL'],input_cat['COORD_YPIXEL'],input_cat['MAG']):\n if detected_cat['detected'][i]==0 and x1 >= int(x2)-pixradius and x1 <= int(x2)+pixradius and y1 >= int(y2)-pixradius and y1 <= int(y2)+pixradius:\n #Test the minimum distance\n dist=(x2-x1)**2+(y2-y1)**2\n if dist < min_dist:# and detected_cat['MAG_AUTO'][i] > 0.9*mag and detected_cat['MAG_AUTO'][i] < 1.1*mag:\n min_dist=dist\n x_det=x1\n y_det=y1\n mag_det=detected_cat['MAG_AUTO'][i]\n index=j\n j+=1\n if min_dist<1e40:\n nb+=1\n detected_cat['detected'][i]=1\n #print ('Matched sources n. {0:d} at position: {1:.2f}-{2:.2f} \\n'.format(i,x_det,y_det))\n input_cat['detected'][index]=1\n input_cat['x_coord_det'][index]=x_det\n input_cat['y_coord_det'][index]=y_det\n input_cat['mag_det'][index]=mag_det\n else:\n detected_cat['detected'][i]=-1\n #print ('Matched sources n. {0:d} at position: {1:.2f}-{2:.2f} \\n'.format(i,x_det,y_det))\n\n i+=1\n\n\n \"\"\"\n for x1,y1 in zip(input_cat['COORD_YPIXEL'],input_cat['COORD_XPIXEL']):\n nb+=1\n #print ('object n. {0:d} at position: {1:.2f}-{2:.2f} \\n'.format(nb,x1,y1))\n min_dist=1e40\n x_det=-1;y_det=-1;\n j=0\n for x2, y2 in zip (detected_cat['XPEAK_IMAGE'], detected_cat['YPEAK_IMAGE']):\n if detected_cat['detected'][j]==0 and x2 >= int(x1)-pixradius and x2 <= int(x1)+pixradius and y2 >= int(y1)-pixradius and y2 <= int(y1)+pixradius:\n #Test the minimum distance\n dist=(x2-x1)**2+(y2-y1)**2\n if dist < min_dist:\n min_dist=dist\n x_det=x2\n y_det=y2\n mag_det=detected_cat['MAG_AUTO'][j]\n index=j\n j+=1\n \n if min_dist<1e40:\n i+=1\n detected_cat['detected'][index]=1\n #print ('Matched sources n. {0:d} at position: {1:.2f}-{2:.2f} \\n'.format(i,x_det,y_det))\n input_cat['detected'][nb-1]=1\n input_cat['x_coord_det'][nb-1]=x_det\n input_cat['y_coord_det'][nb-1]=y_det\n input_cat['mag_det'][nb-1]=mag_det\n \"\"\"\n #Cross match catalog\n print ('Number of sources matched in both catalogs: %d' % nb)\n\n #Write output file\n ascii.write(input_cat,'%s.txt' % output_fname)\n\n\n\n x_false_list=detected_cat['XPEAK_IMAGE'][detected_cat['detected']==-1]\n y_false_list=detected_cat['YPEAK_IMAGE'][detected_cat['detected']==-1]\n mag_sex=detected_cat['MAG_AUTO'][detected_cat['detected']==-1]\n\n #x_det_coord=Column(name='x_coord',data=x_det_list)\n #y_det_coord=Column(name='y_coord',data=y_det_list)\n #mag_det=Column(name='mag_det',data=mag_sex)\n false_det_cat=Table([x_false_list,y_false_list,mag_sex],names=('x_coord','y_coord','mag_det'))\n\n\n #Write false detections in a separated file\n ascii.write(false_det_cat,'%s.txt' % cat_falsedet)",
"def identify_and_handel_outliers(self):\n col_list = [] # This will hold the column names created for the administration of the modified z-score test\n values_dropped = []\n cont_cols = self.df.select_dtypes(exclude=[\"category\"]).columns # Gets continous columns \n for col in cont_cols:\n#TODO: Add lines to check column len(), if len() == 0, drop drop column, create cont_cols and cat_cols, and drop from there as well. \n df_len = len(self.df)\n top_value = self.df[col].value_counts(normalize=True, ascending=False, dropna=True)\n top_value = top_value.head(1).reset_index().to_numpy()[0] #Gets the top occuring value along with its percentage of occurances\n if top_value[1] > 0.5:#Test if the top occuring value makes up more than 50% of the data\n remaining_col = self.df[col][~self.df[col].isin([top_value[0]])] #Gets all values not within the 50% of single value data\n self.df[f\"{col}_mod_z\"] = phase_one_data_prep.modified_zscore(remaining_col) #Gets modified z-score for remaining items\n self.df[f\"{col}_mod_z\"] = self.df[f\"{col}_mod_z\"].fillna(0) #Fills all missing z-scores\\\n #with zero(because that 50% of data removed would be zero anyways)\n self.df = self.df[self.df[f\"{col}_mod_z\"] < 3] #Removed all values outside 3\n col_list.append(f\"{col}_mod_z\")#Appends name of column to list\n values_dropped.append((col, df_len - len(self.df)))\n else:\n self.df[f\"{col}_mod_z\"] = phase_one_data_prep.modified_zscore(self.df[col]) #Gets modified z-score \n self.df[f\"{col}_mod_z\"] = self.df[f\"{col}_mod_z\"].fillna(0)\n self.df = self.df[self.df[f\"{col}_mod_z\"] < 3] #Removed all values outside 3\n col_list.append(f\"{col}_mod_z\")#Appends name of column to list\n values_dropped.append((col, df_len - len(self.df)))\n self.df.drop(columns = col_list, inplace=True)#Removed columns created to test modified z-score\n self.outliers_dropped = values_dropped",
"def filter_data(df, coords, vel=False, vel_threshold=0.15, radiant_threshold=5):\n\tdf = df[(df.range > 110) & (df.range < 130) \n\t\t& (df.th < 70) # Unphysical, given the range cut.\n\t\t& (df.fl == 0)] # Bad interferometry if fl=1\n\n\t# Velocity cut\n\tif vel:\n\t\tdf = df[(df.new_ptn > vel*(1-vel_threshold)) \n\t\t& (df.new_ptn < vel*(1+vel_threshold))]\n\n\t# Identify the shower radiant\n\tif isinstance(coords, str):\n\t\twavelet = read_wavelet(coords)\n\t\tdf = df.apply(get_wavelet_radiant, axis=1, args=(wavelet,))\n\telse:\n\t\tdf['radiant_ll0'] = coords[0]\n\t\tdf['radiant_beta'] = coords[1]\n\n\tdf['separation'] = df.apply(check_radiant, axis=1)\n\tdf.drop(['radiant_ll0', 'radiant_beta'], axis=1, inplace=True)\n\tdf_shower = df[df['separation'] <= radiant_threshold]\n\treturn df_shower",
"def plot_HDres_histos_vs_z(\n df,\n nameout,\n threshold_var=\"class0\",\n threshold_list=[0.5, 0.7, 0.9],\n threshold_sign=\">\",\n):\n\n P = df[df[\"class0\"] > 0.5]\n Ias = df[df[\"target\"] == 0]\n\n TP = P[P[\"target\"] == 0]\n FP = P[P[\"target\"] != 0]\n\n sel_TP_dic = {}\n sel_FP_dic = {}\n for t in threshold_list:\n if threshold_sign == \">\":\n sel_TP_dic[t] = TP[TP[threshold_var] > t]\n sel_FP_dic[t] = FP[FP[threshold_var] > t]\n else:\n sel_TP_dic[t] = TP[TP[threshold_var] < t]\n sel_FP_dic[t] = FP[FP[threshold_var] < t]\n\n plt.clf()\n cm = CMAP\n fig = plt.figure(figsize=(14, 14))\n # gs = gridspec.GridSpec(4, 2, width_ratios=[3, 1], height_ratios=[2, 2, 1, 1])\n # gs.update(wspace=0.1, hspace=0.3)\n\n # # gridspec init\n # ax00 = plt.subplot(gs[0, 0]) # Hres Ia\n # ax10 = plt.subplot(gs[1, 0], sharex=ax00) # Hres CC\n # ax20 = plt.subplot(gs[2:, 0], sharex=ax00) # efficiency\n # ax01 = plt.subplot(gs[0, 1], sharey=ax00) # histo Ia\n # ax11 = plt.subplot(gs[1, 1], sharey=ax10) # histo CC\n # ax21 = plt.subplot(gs[2, 1]) # histo x1\n # ax31 = plt.subplot(gs[3, 1]) # histo c\n gs = gridspec.GridSpec(3, 3, height_ratios=[2, 2, 1])\n # gs.update(wspace=0.2, hspace=0.1)\n\n # gridspec init\n ax00 = plt.subplot(gs[0, 0:2]) # Hres Ia\n ax10 = plt.subplot(gs[1, 0:2], sharex=ax00) # Hres CC\n ax20 = plt.subplot(gs[2, 0]) # redshift dist\n ax01 = plt.subplot(gs[0, 2], sharey=ax00) # histo Ia\n ax11 = plt.subplot(gs[1, 2], sharey=ax10) # histo CC\n ax21 = plt.subplot(gs[2, 1]) # histo x1\n ax31 = plt.subplot(gs[2, 2]) # histo c\n\n # lines\n ax00.plot([0, 1.2], np.zeros(len([0, 1.2])), \"k:\")\n ax10.plot([0, 1.2], np.zeros(len([0, 1.2])), \"k:\")\n\n mubins = np.arange(-2, 2 + 0.1, 0.1)\n\n # Hres w. histogram\n def HRwhisto(\n df, sel_dic, ax_left, ax_right, threshold_sign, ylabel=\"TP\", visible=False\n ):\n if ylabel == \"TP\":\n sntyp = \"Ia\"\n else:\n sntyp = \"CC\"\n ax_left.scatter(\n df[\"SIM_REDSHIFT_CMB\"],\n df[\"delmu\"],\n c=df[\"class0\"],\n cmap=CMAP,\n vmin=0.5,\n vmax=1,\n s=8,\n )\n ax_left.errorbar(\n df[\"SIM_REDSHIFT_CMB\"],\n df[\"delmu\"],\n yerr=df[\"delmu_err\"],\n color=\"gray\",\n zorder=0,\n fmt=\"none\",\n marker=\"none\",\n )\n\n ax_left.set_ylim(-2, 2)\n ax_left.set_xlim(0, 1.2)\n ax_left.set_ylabel(f\"{ylabel} residual\", fontsize=18)\n ax_left.tick_params(labelsize=14)\n plt.setp(ax_left.get_xticklabels(), visible=visible)\n if visible is True:\n ax_left.set_xlabel(\"simulated redshift\", fontsize=18)\n for t in threshold_list:\n sel = sel_dic[t]\n n_SNe = len(sel)\n ax_right.hist(\n sel[\"delmu\"],\n orientation=\"horizontal\",\n histtype=\"step\",\n color=cm(t),\n bins=mubins,\n density=True,\n label=f\"{n_SNe} {sntyp} {threshold_sign} {t}\",\n lw=2,\n )\n ax_right.legend(loc=\"lower center\", prop={\"size\": 13})\n plt.setp(ax_right.get_yticklabels(), visible=False)\n plt.setp(ax_right.get_xticklabels(), visible=False)\n ax_right.plot(\n [ax_right.get_xlim()[0], ax_right.get_xlim()[1]],\n np.zeros(len([ax_right.get_xlim()[0], ax_right.get_xlim()[1]])),\n \"k:\",\n )\n\n HRwhisto(TP, sel_TP_dic, ax00, ax01, threshold_sign, ylabel=\"TP\", visible=False)\n HRwhisto(FP, sel_FP_dic, ax10, ax11, threshold_sign, ylabel=\"FP\", visible=True)\n\n # z histos\n n, bins_to_use, tmp = ax20.hist(\n Ias[\"SIM_REDSHIFT_CMB\"], histtype=\"step\", color=\"black\", bins=15, lw=3\n )\n\n for t in threshold_list:\n sel_TP = sel_TP_dic[t]\n sel_FP = sel_FP_dic[t]\n ax20.hist(\n sel_TP[\"SIM_REDSHIFT_CMB\"], histtype=\"step\", color=cm(t), bins=bins_to_use\n )\n ax20.hist(\n sel_FP[\"SIM_REDSHIFT_CMB\"],\n histtype=\"step\",\n color=cm(t),\n linestyle=\"--\",\n bins=bins_to_use,\n )\n ax20.set_xlim(0, 1.2)\n ax20.tick_params(labelsize=14)\n ax20.set_xlabel(\"simulated redshift\", fontsize=18)\n\n # hist stretch\n n, bins_to_use, tmp = ax21.hist(Ias[\"x1\"], color=\"black\", histtype=\"step\", lw=3)\n for t in threshold_list:\n sel_TP = sel_TP_dic[t]\n ax21.hist(\n sel_TP[\"x1\"],\n orientation=\"vertical\",\n histtype=\"step\",\n color=cm(t),\n bins=bins_to_use,\n lw=2,\n )\n ax21.set_xlabel(\"x1\", fontsize=18)\n ax21.yaxis.set_label_position(\"right\")\n ax21.set_xlim(-3, 3)\n ax21.tick_params(labelsize=14)\n # color histo\n n, bins_to_use, tmp = ax31.hist(Ias[\"c\"], color=\"black\", histtype=\"step\", lw=3)\n for t in threshold_list:\n sel_TP = sel_TP_dic[t]\n ax31.hist(\n sel_TP[\"c\"],\n orientation=\"vertical\",\n histtype=\"step\",\n color=cm(t),\n bins=bins_to_use,\n lw=2,\n )\n ax31.set_xlabel(\"c\", fontsize=18)\n ax31.set_xlim(-1, 1)\n ax31.tick_params(labelsize=14)\n ax31.yaxis.set_label_position(\"right\")\n\n gs.tight_layout(fig)\n plt.savefig(nameout)\n plt.close()\n del fig",
"def mut_filter(df, rate, binary_cutoff=12):\n get_min_count = lambda s: s.value_counts().min() if len(s.unique()) > 1 else -1\n df = df[df.apply(get_min_count, axis=1) > binary_cutoff]\n cc = H.screen_feature(rate, rev_kruskal, df)\n\n fc_apply = lambda s: fc(s, rate)\n direction = df.apply(fc_apply, axis=1)\n direction.name = 'direction'\n\n cc = cc.join(direction)\n #cc = cc[cc.direction == False]\n #return cc\n\n df = df.ix[H.true_index((cc.p > .01) | (cc.direction == True))]\n df = df.dropna(axis=1)\n return df",
"def apply_cuts(objects):\n #- Check if objects is a filename instead of the actual data\n if isinstance(objects, (str, unicode)):\n objects = io.read_tractor(objects)\n \n #- undo Milky Way extinction\n flux = unextinct_fluxes(objects)\n gflux = flux['GFLUX']\n rflux = flux['RFLUX']\n zflux = flux['ZFLUX']\n w1flux = flux['W1FLUX']\n wflux = flux['WFLUX']\n \n #- DR1 has targets off the edge of the brick; trim to just this brick\n if 'BRICK_PRIMARY' in objects.dtype.names:\n primary = objects['BRICK_PRIMARY']\n else:\n primary = np.ones(len(objects), dtype=bool)\n \n #----- LRG\n lrg = primary.copy()\n lrg &= rflux > 10**((22.5-23.0)/2.5)\n lrg &= zflux > 10**((22.5-20.56)/2.5)\n lrg &= w1flux > 10**((22.5-19.35)/2.5)\n lrg &= zflux > rflux * 10**(1.6/2.5)\n #- clip to avoid warnings from negative numbers raised to fractional powers\n lrg &= w1flux * rflux.clip(0)**(1.33-1) > zflux.clip(0)**1.33 * 10**(-0.33/2.5)\n\n #----- ELG\n elg = primary.copy()\n elg &= rflux > 10**((22.5-23.4)/2.5)\n elg &= zflux > rflux * 10**(0.3/2.5)\n elg &= zflux < rflux * 10**(1.5/2.5)\n elg &= rflux**2 < gflux * zflux * 10**(-0.2/2.5)\n elg &= zflux < gflux * 10**(1.2/2.5)\n\n #----- Quasars\n psflike = ((objects['TYPE'] == 'PSF') | (objects['TYPE'] == 'PSF ')) \n qso = primary.copy()\n qso &= psflike\n qso &= rflux > 10**((22.5-23.0)/2.5)\n qso &= rflux < gflux * 10**(1.0/2.5)\n qso &= zflux > rflux * 10**(-0.3/2.5)\n qso &= zflux < rflux * 10**(1.1/2.5)\n #- clip to avoid warnings from negative numbers raised to fractional powers\n qso &= wflux * gflux.clip(0)**1.2 > rflux.clip(0)**(1+1.2) * 10**(-0.4/2.5)\n ### qso &= wflux * gflux**1.2 > rflux**(1+1.2) * 10**(2/2.5)\n\n #------ Bright Galaxy Survey\n #- 'PSF' for astropy.io.fits; 'PSF ' for fitsio (sigh)\n bgs = primary.copy()\n bgs &= ~psflike\n bgs &= rflux > 10**((22.5-19.35)/2.5)\n\n #----- Standard stars\n fstd = primary.copy()\n fstd &= psflike\n fracflux = objects['DECAM_FRACFLUX'].T \n signal2noise = objects['DECAM_FLUX'] * np.sqrt(objects['DECAM_FLUX_IVAR'])\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n for j in (1,2,4): #- g, r, z\n fstd &= fracflux[j] < 0.04\n fstd &= signal2noise[:, j] > 10\n\n #- observed flux; no Milky Way extinction\n obs_rflux = objects['DECAM_FLUX'][:, 2]\n fstd &= obs_rflux < 10**((22.5-16.0)/2.5)\n fstd &= obs_rflux > 10**((22.5-19.0)/2.5)\n #- colors near BD+17; ignore warnings about flux<=0\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n grcolor = 2.5 * np.log10(rflux / gflux)\n rzcolor = 2.5 * np.log10(zflux / rflux)\n fstd &= (grcolor - 0.32)**2 + (rzcolor - 0.13)**2 < 0.06**2\n\n #-----\n #- construct the targetflag bits\n #- Currently our only cuts are DECam based (i.e. South)\n desi_target = lrg * desi_mask.LRG_SOUTH\n desi_target |= elg * desi_mask.ELG_SOUTH\n desi_target |= qso * desi_mask.QSO_SOUTH\n\n desi_target |= lrg * desi_mask.LRG\n desi_target |= elg * desi_mask.ELG\n desi_target |= qso * desi_mask.QSO\n\n desi_target |= fstd * desi_mask.STD_FSTAR\n \n bgs_target = bgs * bgs_mask.BGS_BRIGHT\n bgs_target |= bgs * bgs_mask.BGS_BRIGHT_SOUTH\n\n #- nothing for MWS yet; will be GAIA-based\n mws_target = np.zeros_like(bgs_target)\n\n #- Are any BGS or MWS bit set? Tell desi_target too.\n desi_target |= (bgs_target != 0) * desi_mask.BGS_ANY\n desi_target |= (mws_target != 0) * desi_mask.MWS_ANY\n\n return desi_target, bgs_target, mws_target",
"def filter_catalogs(conn, catalogs, res): \n # Determine which resolution range the image belongs in\n for config, res_range in res_dict.items():\n if res_range[0] < res <= res_range[1]:\n use_range = res_range\n # Combine highest resolutions to allow for more catalogs\n if config == 'A' or config == 'B':\n use_range = (res_dict['A'][0], res_dict['B'][1])\n\n # Find all catalogs that fall into the adequate resolution range\n cur = conn.cursor()\n filtered_catalogs = []\n for catalog in catalogs:\n try:\n catalog_res = catalogio.catalog_dict[catalog]['resolution']\n except KeyError:\n cur.execute('''SELECT resolution FROM radcat.catalogs\n WHERE name = %s''', (catalog, ))\n catalog_res = cur.fetchone()[0]\n if use_range[0] < catalog_res <= use_range[1]:\n filtered_catalogs.append(catalog)\n\n cur.close()\n\n return filtered_catalogs",
"def add_climatology_cols(df):\n return df",
"def view_marginals_raw(data, label=''):\n variables = ['sao2', 'heartrate', 'respiration', 'systemicmean']\n\n num_gradations = 25\n # for cutoff in the gradations, what fraction of samples (at a given time point) fall into that cutoff bracket?\n grid = np.zeros(shape=(16, num_gradations, 4))\n grid = np.zeros(shape=(16, num_gradations, 4))\n assert data.shape[-1] == 4\n ranges = []\n for var in range(4):\n # allow for a different range per variable (if zoom)\n low = np.min(data[:, :, var])\n high = np.max(data[:, :, var])\n ranges.append([low, high])\n gradations = np.linspace(low, high, num_gradations)\n for (i, cutoff) in enumerate(gradations):\n # take the mean over samples\n frac = ((data[:, :, var] > low) & (data[:, :, var] <= cutoff)).mean(axis=0)\n low = cutoff\n grid[:, i, var] = frac\n\n fig, axarr = plt.subplots(nrows=4, ncols=1, sharex=True)\n axarr[0].imshow(grid[:, :, 0].T, origin='lower', aspect=0.5, cmap='magma_r')\n axarr[1].imshow(grid[:, :, 1].T, origin='lower', aspect=0.5, cmap='magma_r')\n axarr[2].imshow(grid[:, :, 2].T, origin='lower', aspect=0.5, cmap='magma_r')\n axarr[3].imshow(grid[:, :, 3].T, origin='lower', aspect=0.5, cmap='magma_r')\n\n for (var, ax) in enumerate(axarr):\n labels = np.round(np.linspace(ranges[var][0], ranges[var][1], num_gradations)[1::4], 0)\n ax.set_yticks(np.arange(num_gradations)[1::4])\n ax.set_yticklabels(labels)\n ax.set_ylabel(variables[var])\n ax.yaxis.set_ticks_position('none')\n ax.xaxis.set_ticks_position('none')\n ax.set_adjustable('box-forced')\n ax.spines['top'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['left'].set_visible(False)\n ax.grid(b=True, color='black', alpha=0.2, linestyle='--')\n\n axarr[-1].set_xticks(np.arange(16)[::2])\n\n plt.tight_layout(pad=0.0, w_pad=-5.0, h_pad=0.1)\n plt.savefig(\"./experiments/eval/eICU_marginals_\" + label + \".png\")\n\n return True",
"def get_sn2005ek(colorplt=False):\n z = 0.016551\n ebv = 0.210\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n t_max = 53639.9\n print (\"adopt r band t_max from Drout+13\")\n \n # tb = pd.read_csv('/Users/yuhanyao/Desktop/ZTF18abfcmjw/data/Drout2013/table1', sep='\\t')\n # tb = tb.drop(columns=[\"Unnamed: 6\"])\n \n mjds = np.array([53639.3, 53640.3, 53641.3, 53642.2, 53643.2, 53645.3,\n 53646.5, 53648.0, 53649.2, 53650.4, 53651.3, 53652.5,\n 53654.2, 53655.2, 53656.2, 53657.2])\n \n Bmags = np.array([18.25, 18.38, 18.65, np.nan, 19.10, 19.71,\n 20.07, np.nan, 20.67, 20.90, 21.05, np.nan,\n 21.74, np.nan, np.nan, np.nan])\n \n Bmag_uncs = np.array([0.02, 0.03, 0.02, np.nan, 0.05, 0.07, \n 0.07, np.nan, 0.04, 0.04, 0.04, np.nan,\n 0.12, np.nan, np.nan, np.nan])\n \n Vmags = np.array([17.83, 18.03, 17.92, np.nan, 18.24, 18.66,\n 18.93, 19.48, 19.63, 19.86, 19.98, 20.35,\n 20.60, 20.74, 20.88, 21.22])\n \n Vmag_uncs = np.array([0.02, 0.03, 0.01, np.nan, 0.02, 0.02,\n 0.02, 0.06, 0.03, 0.03, 0.04, 0.05, \n 0.08, 0.10, 0.08, 0.13])\n \n Rmags = np.array([17.46, 17.41, 17.60, 17.69, 17.86, 18.18, \n np.nan, 18.83, 19.03, 19.26, 19.48, 19.75,\n 20.08, np.nan, 20.47, np.nan])\n \n Rmag_uncs = np.array([0.01, 0.02, 0.01, 0.02, 0.01, 0.01,\n np.nan, 0.03, 0.02, 0.02, 0.02, 0.04,\n 0.05, np.nan, 0.08, np.nan])\n\n Imags = np.array([17.20, 17.13, 17.18, np.nan, 17.47, 17.71, \n np.nan, 18.13, 18.26, 18.51, 18.61, 18.74, \n 19.01, np.nan, 19.47, np.nan])\n \n Imag_uncs = np.array([0.02, 0.04, 0.02, np.nan, 0.03, 0.02,\n np.nan, 0.06, 0.02, 0.02, 0.02, 0.03,\n 0.05, np.nan, 0.06, np.nan])\n \n mymjds = np.hstack([mjds, mjds, mjds, mjds])\n mymags = np.hstack([Bmags, Vmags, Rmags, Imags])\n myemags = np.hstack([Bmag_uncs, Vmag_uncs, Rmag_uncs, Imag_uncs])\n myfilts = np.hstack([ np.repeat(\"B\", len(Bmags)),\n np.repeat(\"V\", len(Bmags)),\n np.repeat(\"R\", len(Rmags)),\n np.repeat(\"I\", len(Imags)) ])\n ix = ~np.isnan(mymags)\n tb = pd.DataFrame({'mjd': mymjds[ix],\n 'mag': mymags[ix],\n 'emag': myemags[ix],\n \"filter\": myfilts[ix]})\n \n ixB = tb['filter'].values==\"B\"\n ixV = tb['filter'].values==\"V\"\n ixR = tb['filter'].values==\"R\"\n ixI = tb['filter'].values==\"I\"\n \n tb['wave'] = np.zeros(len(tb))\n tb['wave'].values[ixB] = 4359\n tb['wave'].values[ixV] = 5430\n tb['wave'].values[ixR] = 6349\n tb['wave'].values[ixI] = 8797\n \n tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)\n tb['mag0_abs'] = tb['mag0'] - dis_mod\n tb['tmax_rf'] = (tb['mjd'] - t_max) / (1+z)\n if colorplt==False:\n return tb\n else:\n tb = add_datecol(tb)\n ix = np.in1d(tb[\"filter\"].values, np.array(['B', 'R', 'I']))\n tb = tb[ix]\n\n dates = get_date_span(tb)\n datesave = []\n for i in range(len(dates)):\n x = dates[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n if len(tbsub)!=0:\n flts = tbsub['filter'].values\n if \"R\" in flts and np.sum(np.unique(flts))!=1:\n datesave.append(x)\n datesave = np.array(datesave)\n \n mcolor = []\n mcolor_unc = []\n mjds = []\n colorname = []\n for i in range(len(datesave)):\n x = datesave[i]\n ix = tb[\"date\"].values == x\n tbsub = tb[ix]\n gtb = tbsub[tbsub[\"filter\"].values==\"B\"]\n rtb = tbsub[tbsub[\"filter\"].values==\"R\"]\n itb = tbsub[tbsub[\"filter\"].values==\"I\"]\n if len(gtb)!=0:\n gmjds = gtb[\"mjd\"].values\n gmags = gtb[\"mag0\"].values\n gemags = gtb[\"emag\"].values\n gwtgs = 1/gemags**2\n gmag = np.sum(gmags * gwtgs) / np.sum(gwtgs)\n gmjd = np.sum(gmjds * gwtgs) / np.sum(gwtgs)\n gemag = 1/ np.sqrt(np.sum(gwtgs))\n if len(rtb)!=0:\n rmjds = rtb[\"mjd\"].values\n rmags = rtb[\"mag0\"].values\n remags = rtb[\"emag\"].values\n rwtgs = 1/remags**2\n rmag = np.sum(rmags * rwtgs) / np.sum(rwtgs)\n rmjd = np.sum(rmjds * rwtgs) / np.sum(rwtgs)\n remag = 1/ np.sqrt(np.sum(rwtgs))\n if len(itb)!=0:\n imjds = itb[\"mjd\"].values\n imags = itb[\"mag0\"].values\n iemags = itb[\"emag\"].values\n iwtgs = 1/iemags**2\n imag = np.sum(imags * iwtgs) / np.sum(iwtgs)\n imjd = np.sum(imjds * iwtgs) / np.sum(iwtgs)\n iemag = 1/ np.sqrt(np.sum(iwtgs))\n if len(gtb)!=0 and len(rtb)!=0:\n mcolor.append(gmag - rmag)\n mjds.append( 0.5 * (gmjd + rmjd) )\n mcolor_unc.append( np.sqrt(gemag**2 + remag**2) )\n colorname.append(\"BmR\")\n if len(rtb)!=0 and len(itb)!=0:\n mcolor.append(rmag - imag)\n mjds.append( 0.5 * (rmjd + imjd) )\n mcolor_unc.append( np.sqrt(remag**2 + iemag**2) )\n colorname.append(\"RmI\")\n \n ctb = Table(data = [mjds, mcolor, mcolor_unc, colorname],\n names = [\"mjd\", \"c\", \"ec\", \"cname\"])\n \n ctb['tmax_rf'] = (ctb['mjd'] - t_max) / (1+z)\n ctb = ctb.to_pandas()\n return ctb",
"def plot_completeness(cat_name,output_name,name_plot,mag_lims,binning_mag,plot,second_cat='no'):\n\n cat=ascii.read('%s.txt' % cat_name)\n mag_bins=np.arange(mag_lims[0],mag_lims[1],binning_mag)\n\n mask=cat['detected']==1\n mag_binned_tot=np.digitize(cat['MAG'],mag_bins,right=True)\n mag_binned_det=np.digitize(cat[mask]['MAG'],mag_bins,right=True)\n\n nb_mag=np.array([ len(np.where(mag_binned_tot==i)[0]) for i in range(1,len(mag_bins)) ])\n nb_mag_det = np.array([ len(np.where(mag_binned_det==i)[0]) for i in range(1,len(mag_bins)) ])\n #mag_tot= np.array([stuff_cat['MAG'][mag_binned_tot == i].mean() for i in range(1, len(mag_bins))])\n #mag_det= np.array([stuff_cat[mask]['MAG'][mag_binned_det == i].mean() for i in range(1, len(mag_bins))])\n print (nb_mag)\n print (nb_mag_det)\n\n #Write completeness result in text file\n np.savetxt('%s.txt' % output_name, list(zip(mag_bins,nb_mag,nb_mag_det)),fmt='%.2f %d %d')\n\n\n mag_bin_plot=(mag_bins[:-1]+mag_bins[1:])/2\n\n import matplotlib.pyplot as plt\n\n # the histogram of the input sources\n n, bins, patches = plt.hist(cat['MAG'], mag_bins, normed=0, facecolor='green', alpha=0.75)\n plt.xlabel('Magnitude')\n plt.ylabel('Nb of sources')\n plt.xlim([mag_bins[0],mag_bins[-1]])\n plt.savefig('results/plots/hist_sources.png')\n #plt.show()\n\n plt.clf()\n plt.plot(mag_bin_plot,nb_mag_det/nb_mag)\n plt.xlabel('Magnitude AB')\n plt.ylabel('Efficiency')\n plt.grid(True)\n plt.savefig('%s.png' % output_name)\n if plot: plt.show()\n\n\n if second_cat != 'no':\n cat2=ascii.read('%s.txt' % second_cat)\n mag_bins2=np.arange(mag_lims[0],mag_lims[1],binning_mag)\n\n mask2=cat2['detected']==1\n mag_binned_tot2=np.digitize(cat2['MAG'],mag_bins2,right=True)\n mag_binned_det2=np.digitize(cat2[mask2]['MAG'],mag_bins2,right=True)\n\n nb_mag2=np.array([ len(np.where(mag_binned_tot2==i)[0]) for i in range(1,len(mag_bins2)) ])\n nb_mag_det2 = np.array([ len(np.where(mag_binned_det2==i)[0]) for i in range(1,len(mag_bins2)) ])\n\n mag_bin_plot2=(mag_bins2[:-1]+mag_bins2[1:])/2\n #print (mag_bin_plot)\n #plt.plot(mag_bin_plot,nb_mag_det/nb_mag,label='seeing=0.7\"',color='red')\n #plt.plot(mag_bin_plot2,nb_mag_det2/nb_mag2,label='seeing=0.1\"',color='green')\n plt.plot(mag_bin_plot,nb_mag_det/nb_mag,label='5.9',color='red')\n plt.plot(mag_bin_plot2,nb_mag_det2/nb_mag2,label='5',color='green')\n plt.xlabel('Magnitude AB')\n plt.ylabel('Efficiency')\n #plt.yscale('log')\n #plt.xscale('log')\n plt.grid(True)\n plt.legend()\n plt.savefig('results/plots/completeness_comp.png')\n if plot: plt.show()",
"def make_photom_catalog_uvis(data, filt, origin=''):\n\tfnarr = [data[key]['filename'] for key in data.keys()]\n\t# Make sure filename does not include path.\n\tif '/' in fnarr[0]:\n\t if 'temp_lacos' in fnarr[0]:\n\t for i in range(len(fnarr)):\n\t file_name = fnarr[i].split('temp_lacos/')[1]\n\t fnarr[i] = file_name\n\t else:\n\t for i in range(len(fnarr)):\n\t file_name = (fnarr[i].split('/'))[len(fnarr[i].split('/'))-1]\n\t fnarr[i] = file_name\n\t\n\tamparr = [data[key]['amp'] for key in data.keys()]\n\tshutarr = [data[key]['shutter'] for key in data.keys()]\n\tmjdarr = [data[key]['mjd_avg'] for key in data.keys()]\n\tmjddeltarr = [data[key]['mjd_deltat'] for key in data.keys()]\n\tchiparr = [data[key]['chip'] for key in data.keys()]\n\taxis1arr = [data[key]['axis1'] for key in data.keys()]\n\taxis2arr = [data[key]['axis2'] for key in data.keys()]\n\txcarr = [data[key]['xc'] for key in data.keys()]\n\tycarr = [data[key]['yc'] for key in data.keys()]\n\txcparr = [data[key]['xcp'] for key in data.keys()]\n\tycparr = [data[key]['ycp'] for key in data.keys()]\n\tbackarr = [data[key]['background'] for key in data.keys()]\n\tbackrmsarr = [data[key]['background_rms'] for key in data.keys()]\n\texptimearr = [data[key]['exptime'] for key in data.keys()]\n\tf1 = [data[key]['flux'][0] for key in data.keys()]\n\tf2 = [data[key]['flux'][1] for key in data.keys()]\n\tf3 = [data[key]['flux'][2] for key in data.keys()]\n\tf4 = [data[key]['flux'][3] for key in data.keys()]\n\tf5 = [data[key]['flux'][4] for key in data.keys()]\n\tf6 = [data[key]['flux'][5] for key in data.keys()]\n\tf7 = [data[key]['flux'][6] for key in data.keys()]\n\tf8 = [data[key]['flux'][7] for key in data.keys()]\n\tf9 = [data[key]['flux'][8] for key in data.keys()]\n\tf10 = [data[key]['flux'][9] for key in data.keys()]\n\tf12 = [data[key]['flux'][10] for key in data.keys()]\n\tf14 = [data[key]['flux'][11] for key in data.keys()]\n\tf16 = [data[key]['flux'][12] for key in data.keys()]\n\tf18 = [data[key]['flux'][13] for key in data.keys()]\n\tf20 = [data[key]['flux'][14] for key in data.keys()]\n\tf24 = [data[key]['flux'][15] for key in data.keys()]\n\tf28 = [data[key]['flux'][16] for key in data.keys()]\n\tf32 = [data[key]['flux'][17] for key in data.keys()]\n\tf36 = [data[key]['flux'][18] for key in data.keys()]\n\tf40 = [data[key]['flux'][19] for key in data.keys()]\n\tf45 = [data[key]['flux'][20] for key in data.keys()]\n\tf50 = [data[key]['flux'][21] for key in data.keys()]\n\tf55 = [data[key]['flux'][22] for key in data.keys()]\n\tf60 = [data[key]['flux'][23] for key in data.keys()]\n\tf65 = [data[key]['flux'][24] for key in data.keys()]\n\tf70 = [data[key]['flux'][25] for key in data.keys()]\n \n\tm1 = [data[key]['mag'][0] for key in data.keys()]\n\tm2 = [data[key]['mag'][1] for key in data.keys()]\n\tm3 = [data[key]['mag'][2] for key in data.keys()]\n\tm4 = [data[key]['mag'][3] for key in data.keys()]\n\tm5 = [data[key]['mag'][4] for key in data.keys()]\n\tm6 = [data[key]['mag'][5] for key in data.keys()]\n\tm7 = [data[key]['mag'][6] for key in data.keys()]\n\tm8 = [data[key]['mag'][7] for key in data.keys()]\n\tm9 = [data[key]['mag'][8] for key in data.keys()]\n\tm10 = [data[key]['mag'][9] for key in data.keys()]\n\tm12 = [data[key]['mag'][10] for key in data.keys()]\n\tm14 = [data[key]['mag'][11] for key in data.keys()]\n\tm16 = [data[key]['mag'][12] for key in data.keys()]\n\tm18 = [data[key]['mag'][13] for key in data.keys()]\n\tm20 = [data[key]['mag'][14] for key in data.keys()]\n\tm24 = [data[key]['mag'][15] for key in data.keys()]\n\tm28 = [data[key]['mag'][16] for key in data.keys()]\n\tm32 = [data[key]['mag'][17] for key in data.keys()]\n\tm36 = [data[key]['mag'][18] for key in data.keys()]\n\tm40 = [data[key]['mag'][19] for key in data.keys()]\n\tm45 = [data[key]['mag'][20] for key in data.keys()]\n\tm50 = [data[key]['mag'][21] for key in data.keys()]\n\tm55 = [data[key]['mag'][22] for key in data.keys()]\n\tm60 = [data[key]['mag'][23] for key in data.keys()]\n\tm65 = [data[key]['mag'][24] for key in data.keys()]\n\tm70 = [data[key]['mag'][25] for key in data.keys()]\n\t\n\tm1_err = [data[key]['merr'][0] for key in data.keys()]\n\tm2_err = [data[key]['merr'][1] for key in data.keys()]\n\tm3_err = [data[key]['merr'][2] for key in data.keys()]\n\tm4_err = [data[key]['merr'][3] for key in data.keys()]\n\tm5_err = [data[key]['merr'][4] for key in data.keys()]\n\tm6_err = [data[key]['merr'][5] for key in data.keys()]\n\tm7_err = [data[key]['merr'][6] for key in data.keys()]\n\tm8_err = [data[key]['merr'][7] for key in data.keys()]\n\tm9_err = [data[key]['merr'][8] for key in data.keys()]\n\tm10_err = [data[key]['merr'][9] for key in data.keys()]\n\tm12_err = [data[key]['merr'][10] for key in data.keys()]\n\tm14_err = [data[key]['merr'][11] for key in data.keys()]\n\tm16_err = [data[key]['merr'][12] for key in data.keys()]\n\tm18_err = [data[key]['merr'][13] for key in data.keys()]\n\tm20_err = [data[key]['merr'][14] for key in data.keys()]\n\tm24_err = [data[key]['merr'][15] for key in data.keys()]\n\tm28_err = [data[key]['merr'][16] for key in data.keys()]\n\tm32_err = [data[key]['merr'][17] for key in data.keys()]\n\tm36_err = [data[key]['merr'][18] for key in data.keys()]\n\tm40_err = [data[key]['merr'][19] for key in data.keys()]\n\tm45_err = [data[key]['merr'][20] for key in data.keys()]\n\tm50_err = [data[key]['merr'][21] for key in data.keys()]\n\tm55_err = [data[key]['merr'][22] for key in data.keys()]\n\tm60_err = [data[key]['merr'][23] for key in data.keys()]\n\tm65_err = [data[key]['merr'][24] for key in data.keys()]\n\tm70_err = [data[key]['merr'][25] for key in data.keys()]\n \n\ttt = {'#filename':fnarr, 'amp':amparr, 'shutter':shutarr, \\\n\t 'mjd_avg':mjdarr, 'mjd_deltat':mjddeltarr, 'chip':chiparr, \\\n\t 'axis1':axis1arr, 'axis2':axis2arr,'xc':xcarr, 'yc':ycarr, \\\n\t 'xcp':xcparr, 'ycp':ycparr, 'background':backarr, \\\n\t 'background_rms':backrmsarr, 'exptime':exptimearr, \\\n 'f1':f1, 'f2':f2, 'f3':f3,'f4':f4,'f5':f5,'f6':f6,'f7':f7,'f8':f8,\\\n 'f9':f9,'f10':f10,'f12':f12,'f14':f14,'f16':f16,'f18':f18,'f20':f20,\\\n 'f24':f24,'f28':f28,'f32':f32,'f36':f36,'f40':f40,'f45':f45,\\\n 'f50':f50,'f55':f55,'f60':f60,'f65':f65,'f70':f70,'m1':m1, 'm2':m2, \\\n 'm3':m3,'m4':m4,'m5':m5,'m6':m6,'m7':m7,'m8':m8,'m9':m9,'m10':m10,\\\n 'm12':m12,'m14':m14,'m16':m16,'m18':m18,'m20':m20,'m24':m24,\\\n 'm28':m28,'m32':m32,'m36':m36,'m40':m40,'m45':m45,'m50':m50,\\\n 'm55':m55,'m60':m60,'m65':m65,'m70':m70,'m1err':m1_err, \\\n 'm2err':m2_err, 'm3err':m3_err,'m4err':m4_err,'m5err':m5_err,\\\n 'm6err':m6_err,'m7err':m7_err,'m8err':m8_err,'m9err':m9_err,\\\n 'm10err':m10_err,'m12err':m12_err,'m14err':m14_err,'m16err':m16_err,\\\n 'm18err':m18_err,'m20err':m20_err,'m24err':m24_err,'m28err':m28_err,\\\n 'm32err':m32_err,'m36err':m36_err,'m40err':m40_err,'m45err':m45_err,\\\n 'm50err':m50_err,'m55err':m55_err,'m60err':m60_err,'m65err':m65_err,\\\n 'm70err':m70_err}\n\n\tascii.write(tt, origin+filt+'_photcat.dat', \\\n\t names=['#filename','amp','shutter','mjd_avg','mjd_deltat',\\\n\t 'chip','axis1','axis2','xc','yc','xcp','ycp',\\\n\t 'background','background_rms','exptime', \\\n 'f1','f2','f3','f4','f5','f6','f7','f8','f9','f10',\\\n 'f12','f14','f16','f18','f20','f24','f28','f32','f36',\\\n 'f40','f45','f50','f55','f60','f65','f70',\\\n 'm1','m2','m3','m4','m5','m6','m7','m8','m9','m10',\\\n 'm12','m14','m16','m18','m20','m24','m28','m32','m36',\\\n 'm40','m45','m50','m55','m60','m65','m70','m1err',\\\n 'm2err','m3err','m4err','m5err','m6err','m7err',\\\n 'm8err','m9err','m10err','m12err','m14err','m16err',\\\n 'm18err','m20err','m24err','m28err','m32err','m36err',\\\n 'm40err','m45err','m50err','m55err','m60err','m65err',\\\n 'm70err'], \\\n formats={'#filename':'%s','amp':'%s','shutter':'%s',\\\n 'mjd_avg':'%9.4f','mjd_deltat':'%6.4f','chip':'%i',\\\n 'axis1':'%i','axis2':'%i','xc':'%8.3f','yc':'%8.3f',\\\n 'xcp':'%8.3f','ycp':'%8.3f', 'background':'%0.5f',\\\n 'background_rms':'%0.5f', 'exptime':'%0.2f', \\\n 'f1':'%0.3f', 'f2':'%0.3f','f3':'%0.3f','f4':'%0.3f',\\\n 'f5':'%0.3f','f6':'%0.3f','f7':'%0.3f','f8':'%0.3f',\\\n 'f9':'%0.3f','f10':'%0.3f','f12':'%0.3f',\\\n 'f14':'%0.3f','f16':'%0.3f','f18':'%0.3f',\\\n 'f20':'%0.3f','f24':'%0.3f','f28':'%0.3f',\\\n 'f32':'%0.3f','f36':'%0.3f','f40':'%0.3f',\\\n 'f45':'%0.3f','f50':'%0.3f','f55':'%0.3f',\\\n 'f60':'%0.3f','f65':'%0.3f','f70':'%0.3f',\\\n 'm1':'%0.3f','m2':'%0.3f','m3':'%0.3f','m4':'%0.3f',\\\n 'm5':'%0.3f','m6':'%0.3f','m7':'%0.3f','m8':'%0.3f',\\\n 'm9':'%0.3f','m10':'%0.3f','m12':'%0.3f',\\\n 'm14':'%0.3f','m16':'%0.3f','m18':'%0.3f',\\\n 'm20':'%0.3f','m24':'%0.3f','m28':'%0.3f',\\\n 'm32':'%0.3f','m36':'%0.3f','m40':'%0.3f',\\\n 'm45':'%0.3f','m50':'%0.3f','m55':'%0.3f',\\\n 'm60':'%0.3f','m65':'%0.3f','m70':'%0.3f', \\\n 'm1err':'%0.3f', 'm2err':'%0.3f','m3err':'%0.3f',\\\n 'm4err':'%0.3f','m5err':'%0.3f','m6err':'%0.3f',\\\n 'm7err':'%0.3f','m8err':'%0.3f','m9err':'%0.3f',\\\n 'm10err':'%0.3f','m12err':'%0.3f','m14err':'%0.3f',\\\n 'm16err':'%0.3f','m18err':'%0.3f','m20err':'%0.3f',\\\n 'm24err':'%0.3f','m28err':'%0.3f','m32err':'%0.3f',\\\n 'm36err':'%0.3f','m40err':'%0.3f','m45err':'%0.3f',\\\n 'm50err':'%0.3f','m55err':'%0.3f','m60err':'%0.3f',\\\n 'm65err':'%0.3f','m70err':'%0.3f'})",
"def apolco(a,minfeh=-3,out=None) :\n apo=np.where((a['TELESCOPE'] == 'apo25m') & (a['RV_FEH']>minfeh) )[0]\n fig=vscat(a[apo],marker='o',density=True)\n lco=np.where((a['TELESCOPE'] == 'lco25m') & (a['RV_FEH']>minfeh) )[0]\n vscat(a[lco],fig=fig,ls=':',marker='+',density=True)\n if out is not None : \n fig[0].savefig(out+'_1.png')\n plt.close()\n i1,i2=match.match(a['APOGEE_ID'][apo],a['APOGEE_ID'][lco])\n print('matched {:d} stars'.format(len(i1)))\n fig,ax=plots.multi(1,2)\n #plots.plotp(ax[0,0],a['SNR'][apo[i1]],a['VHELIO_AVG'][apo[i1]]-a['VHELIO_AVG'][lco[i2]],yr=[-3,3],yt=r'$\\Delta$ VHELIO_AVG',xt='S/N')\n #plots.plotp(ax[0,1],a['SNR'][apo[i1]],a['VHELIO_AVG'][apo[i1]]-a['VHELIO_AVG'][lco[i2]],yr=[-50,50],yt=r'$\\Delta$ VHELIO_AVG',xt='S/N')\n #plots.plotp(ax[1,0],a['SNR'][apo[i1]],a['VSCATTER'][apo[i1]]-a['VSCATTER'][lco[i2]],yr=[-0.5,0.5],yt=r'$\\Delta$ VSCATTER',xt='S/N')\n #plots.plotp(ax[1,1],a['SNR'][apo[i1]],a['VSCATTER'][apo[i1]]-a['VSCATTER'][lco[i2]],yr=[-5,5],yt=r'$\\Delta$ VSCATTER',xt='S/N')\n ax[0].hist(a['VHELIO_AVG'][apo[i1]]-a['VHELIO_AVG'][lco[i2]],bins=np.arange(-0.5,0.5,0.02),histtype='step')\n ax[0].set_xlabel(r'$\\Delta$ VHELIO_AVG')\n ax[1].hist(a['VSCATTER'][apo[i1]]-a['VSCATTER'][lco[i2]],bins=np.arange(-0.25,0.25,0.01),histtype='step')\n ax[1].set_xlabel(r'$\\Delta$ VSCATTER')\n if out is not None : \n fig.savefig(out+'_2.png')\n plt.close()",
"def show_trap_results():\n df_grid = pd.read_hdf('./temp_results.h5', '/optimize_grid')\n print(df_grid)\n \n print('Minimum fwhm:')\n print(df_grid[df_grid.fwhm_ovr_mean==df_grid.fwhm_ovr_mean.min()])\n \n plt.plot(df_grid.e_fit, df_grid.fwhm_ovr_mean, '.b')\n plt.show()",
"def plot_selected(df, title='title', columns=[], shouldNormalize = True, symbol='any stock'):\n #df = df[columns][start_index:end_index]\n #df = df.loc[start_index:end_index, columns]\n df = df.loc[:, columns]\n ylabel=\"Price\"\n normal = \"un normalized\"\n if shouldNormalize:\n df = normalize(df.loc[:,['Close', 'sma200']])\n ylabel = \"%\"\n normal = \"normalized\"\n #print('df.shape in plot=',df.shape)\n plot_data(df, title=title, ylabel=ylabel)",
"def __init__(self, x=0, y=0, flux=None, time=None, wcs=None, quality=None, mask=None, exposure=1800, sector=0,\n size=150,\n camera=1, ccd=1, cadence=None):\n super(Source, self).__init__()\n if cadence is None:\n cadence = []\n if quality is None:\n quality = []\n if wcs is None:\n wcs = []\n if time is None:\n time = []\n if flux is None:\n flux = []\n\n self.size = size\n self.sector = sector\n self.camera = camera\n self.ccd = ccd\n self.cadence = cadence\n self.quality = quality\n self.exposure = exposure\n self.wcs = wcs\n co1 = 38.5\n co2 = 116.5\n catalog_1 = self.search_gaia(x, y, co1, co1)\n catalog_2 = self.search_gaia(x, y, co1, co2)\n catalog_3 = self.search_gaia(x, y, co2, co1)\n catalog_4 = self.search_gaia(x, y, co2, co2)\n catalogdata = vstack([catalog_1, catalog_2, catalog_3, catalog_4], join_type='exact')\n catalogdata = unique(catalogdata, keys='DESIGNATION')\n coord = wcs.pixel_to_world([x + (size - 1) / 2 + 44], [y + (size - 1) / 2])[0].to_string()\n ra = float(coord.split()[0])\n dec = float(coord.split()[1])\n catalogdata_tic = tic_advanced_search_position_rows(ra=ra, dec=dec, radius=(self.size + 2) * 21 * 0.707 / 3600)\n # print(f'no_of_stars={len(catalogdata_tic)}, camera={camera}, ccd={ccd}: ra={ra}, dec={dec}, radius={(self.size + 2) * 21 * 0.707 / 3600}')\n self.tic = convert_gaia_id(catalogdata_tic)\n self.flux = flux[:, y:y + size, x:x + size]\n self.mask = mask[y:y + size, x:x + size]\n self.time = np.array(time)\n median_time = np.median(self.time)\n interval = (median_time - 388.5) / 365.25\n\n num_gaia = len(catalogdata)\n tic_id = np.zeros(num_gaia)\n x_gaia = np.zeros(num_gaia)\n y_gaia = np.zeros(num_gaia)\n tess_mag = np.zeros(num_gaia)\n in_frame = [True] * num_gaia\n for i, designation in enumerate(catalogdata['DESIGNATION']):\n ra = catalogdata['ra'][i]\n dec = catalogdata['dec'][i]\n if not np.isnan(catalogdata['pmra'].mask[i]): # masked?\n ra += catalogdata['pmra'][i] * np.cos(np.deg2rad(dec)) * interval / 1000 / 3600\n if not np.isnan(catalogdata['pmdec'].mask[i]):\n dec += catalogdata['pmdec'][i] * interval / 1000 / 3600\n pixel = self.wcs.all_world2pix(\n np.array([catalogdata['ra'][i], catalogdata['dec'][i]]).reshape((1, 2)), 0, quiet=True)\n x_gaia[i] = pixel[0][0] - x - 44\n y_gaia[i] = pixel[0][1] - y\n try:\n tic_id[i] = catalogdata_tic['ID'][np.where(catalogdata_tic['GAIA'] == designation.split()[2])[0][0]]\n except:\n tic_id[i] = np.nan\n if np.isnan(catalogdata['phot_g_mean_mag'][i]):\n in_frame[i] = False\n elif catalogdata['phot_g_mean_mag'][i] >= 25:\n in_frame[i] = False\n elif -4 < x_gaia[i] < self.size + 3 and -4 < y_gaia[i] < self.size + 3:\n dif = catalogdata['phot_bp_mean_mag'][i] - catalogdata['phot_rp_mean_mag'][i]\n tess_mag[i] = catalogdata['phot_g_mean_mag'][\n i] - 0.00522555 * dif ** 3 + 0.0891337 * dif ** 2 - 0.633923 * dif + 0.0324473\n if np.isnan(tess_mag[i]):\n tess_mag[i] = catalogdata['phot_g_mean_mag'][i] - 0.430\n if np.isnan(tess_mag[i]):\n in_frame[i] = False\n else:\n in_frame[i] = False\n\n tess_flux = 10 ** (- tess_mag / 2.5)\n t = Table()\n t[f'tess_mag'] = tess_mag[in_frame]\n t[f'tess_flux'] = tess_flux[in_frame]\n t[f'tess_flux_ratio'] = tess_flux[in_frame] / np.nanmax(tess_flux[in_frame])\n t[f'sector_{self.sector}_x'] = x_gaia[in_frame]\n t[f'sector_{self.sector}_y'] = y_gaia[in_frame]\n catalogdata = hstack([catalogdata[in_frame], t]) # TODO: sorting not sorting all columns\n catalogdata.sort('tess_mag')\n self.gaia = catalogdata",
"def identify_flux(xyz: list) -> list:\n flagged_lines = [tup for tup in xyz if abs(tup[3]) > THRESHOLDS[0] and abs(tup[4]) > THRESHOLDS[1]]\n\n return flagged_lines",
"def drop_attributes(df, cutoff=25, extra_add=[]):\n\n df_copy = df.copy()\n\n attributs_drop = []\n for var in sorted(df.columns):\n series = df[var]\n perc_missing = 100 - series.count() / len(series) * 100\n\n if perc_missing > cutoff:\n attributs_drop.append(var)\n else:\n continue\n\n if len(extra_add) == 0:\n df_copy.drop(attributs_drop, axis=1, inplace=True)\n\n else:\n attributs_drop = attributs_drop + extra_add\n df_copy.drop(attributs_drop, axis=1, inplace=True)\n\n return df_copy",
"def manipulate_data(ds, var, predef_clim, predef_trnd, trn_yrs, all_yrs, \n apply_latw=True, apply_detrending=True, dropna=True):\n\n \n if((var=='SD')|(var=='sd')|(var=='snowc')): \n ds[var] = ds[var].where(ds[var]>=0, other=0.0)\n ds[var] = ds[var].where(ds[var]==0, other=1.0)\n #ds[var].values = Gauss_filter(ds[var].values, (0,3,3))\n \n \"\"\"\n if((var=='hgt')|(var=='z')|(var=='GPT')):\n months = ds.time.to_index().month; ssn_ends = (months==2)|(months==5)|(months==8)|(months==11)\n ds = ds.sel(time=ssn_ends)\n else: \n ds = ds.resample(time='3M').mean()\n \"\"\"\n \n ds = ds.resample(time='3M').mean()\n\n ds = ds.sel(time=slice(str(all_yrs[0])+'-01-01', str(all_yrs[-1])+'-12-31')) \n \n try: \n clim = predef_clim\n ds = ds.groupby('time.season') - clim\n print('Predefined climatology used')\n except:\n clim = ds.sel(time=slice(str(trn_yrs[0])+'-01-01', str(trn_yrs[-1])+'-12-31')).groupby('time.season').mean('time')\n ds = ds.groupby('time.season') - clim\n print('Climatology calculated from data')\n \n if(apply_latw): ds[var].values = lat_weighting(ds[var].values, \n ds.lat, ds.lon)\n if(dropna):\n ds = ds.stack(gridcell=('lat', 'lon')).dropna(dim='gridcell',how='any')\n else: \n ds = ds.stack(gridcell=('lat', 'lon')).fillna(0)\n \n \n trend_models = { }\n if(apply_detrending): \n ds = ds.load()\n for ssn in ('DJF', 'MAM', 'JJA', 'SON'):\n #ssn_idx = ds['time.season'] == ssn\n \n trn_idx = bool_index_to_int_index(np.isin(ds['time.season'], ssn) & np.isin(ds['time.year'], trn_yrs))\n all_idx = bool_index_to_int_index(np.isin(ds['time.season'], ssn) & np.isin(ds['time.year'], all_yrs))\n \n trn_x = np.array(ds.time[trn_idx].values.tolist()).reshape(-1,1)\n all_x = np.array(ds.time[all_idx].values.tolist()).reshape(-1,1)\n try:\n trend = predef_trnd[ssn].predict(all_x)\n trend_models[ssn] = predef_trnd[ssn]\n print('Predefined trend model used')\n except:\n #_, trend_model = define_trends(ds[var][trn_idx], trn_x)\n _, trend_model = define_trends(ds[var][all_idx], all_x)\n trend = trend_model.predict(all_x)\n trend_models[ssn] = trend_model\n print('Trends calculated from data')\n \n ds[var][all_idx] = ds[var][all_idx] - trend\n \n\n \n return ds, clim, trend_models",
"def cn_filter(df, binary_cutoff=12):\n del_df = (df.ix['Deletion'].dropna(1) < 0).astype(int)\n del_df = del_df[del_df.sum(1) >= binary_cutoff]\n del_df.index = del_df.index.droplevel(1)\n del_df = del_df.T\n amp_df = (df.ix['Amplification'].dropna(1) > 0).astype(int)\n amp_df = amp_df[amp_df.sum(1) >= binary_cutoff]\n amp_df.index = amp_df.index.droplevel(1)\n amp_df = amp_df.T\n return amp_df, del_df",
"def plot_selected(df, title='title', columns=[], shouldNormalize=True, symbol='any stock'):\n # df = df[columns][start_index:end_index]\n # df = df.loc[start_index:end_index, columns]\n df = df.loc[:, columns]\n ylabel = \"Price\"\n normal = \"un normalized\"\n if shouldNormalize:\n df = normalize(df.loc[:, ['Close', 'sma200']])\n ylabel = \"%\"\n normal = \"normalized\"\n # print('df.shape in plot=',df.shape)\n plot_data(df, title=title, ylabel=ylabel)",
"def plot_lc_per_aperture(\n self,\n sector=None,\n kwargs={\"aper_radius\": 1, \"percentile\": 84, \"threshold_sigma\": 3},\n apertures=[\"pipeline\", \"round\", \"square\", \"percentile\", \"threshold\"],\n return_lcs=False,\n ):\n sector = self.sector if sector is None else sector\n nrows = len(apertures)\n fig, axs = pl.subplots(\n nrows=nrows,\n ncols=2,\n figsize=(10, nrows * 2),\n constrained_layout=True,\n gridspec_kw={\"width_ratios\": [3, 1], \"hspace\": 0, \"wspace\": 0},\n )\n custom_lcs = {}\n for n, sap_mask in enumerate(apertures):\n ax1 = axs[n, 0]\n lc = self.make_custom_lc(\n sector=sector, sap_mask=sap_mask, **kwargs\n )\n lc.scatter(ax=ax1, label=sap_mask)\n print(f\"mask={sap_mask}; contratio={self.contratio:.2f}\")\n custom_lcs[sap_mask] = lc\n if n != len(apertures) - 1:\n ax1.set_xlabel(\"\")\n ax1.set_xticklabels(\"\")\n if n == 0:\n ax1.set_title(f\"{self.target_name} (sector {sector})\")\n if self.tpf is None:\n tpf = self.get_tpf()\n else:\n tpf = self.tpf\n img = np.nanmedian(self.tpf.flux, axis=0)\n\n ax2 = axs[n, 1]\n ax = plot_aperture_outline(\n img, mask=self.aper_mask, imgwcs=tpf.wcs, ax=ax2\n )\n ax.axis(\"off\")\n if return_lcs:\n return fig, custom_lcs\n else:\n return fig",
"def control_variation(df, outDir, features_to_analyse, \n variables_to_analyse=[\"date_yyyymmdd\"], \n remove_outliers=True, \n p_value_threshold=0.05, \n PCs_to_keep=10):\n \n # Record non-data columns before dropping feature columns \n other_colnames = [col for col in df.columns if col not in features_to_analyse]\n \n # Drop columns that contain only zeros\n colnames_before = list(df.columns)\n AllZeroFeats = df[features_to_analyse].columns[(df[features_to_analyse] == 0).all()]\n df = df.drop(columns=AllZeroFeats)\n colnames_after = list(df.columns)\n zero_cols = [col for col in colnames_before if col not in colnames_after]\n if len(zero_cols) > 0:\n print(\"Dropped %d features with all-zero summaries:\\n%s\" % (len(zero_cols), zero_cols))\n \n # Record feature column names after dropping zero data\n features_to_analyse = [feat for feat in df.columns if feat not in other_colnames]\n \n # Remove outliers from the dataset \n if remove_outliers:\n df, indsOutliers = removeOutliersMahalanobis(df, features_to_analyse)\n remove_outliers = False \n # NB: Ensure Mahalanobis operation to remove outliers is performed only once!\n\n # Check for normality in features to analyse in order decide which \n # statistical test to use: one-way ANOVA (parametric) or Kruskal-Wallis \n # (non-parametric) test\n TEST = check_normality(df, features_to_analyse, p_value_threshold)\n\n # Record name of statistical test used (kruskal/f_oneway)\n test_name = str(TEST).split(' ')[1].split('.')[-1].split('(')[0].split('\\'')[0]\n\n # CONTROL VARIATION: STATS (ANOVAs)\n # - Does N2 worm behaviour on control vary across experiment days? \n # (worms are larger? Shorter L1 diapuase? Camera focus/FOV adjusted? Skewed by non-worm tracked objects?\n # Did not record time when worms were refed! Could be this. If so, worms will be bigger across all foods on that day) \n # - Perform ANOVA to see if features vary across imaging days for control\n # - Perform Tukey HSD post-hoc analyses for pairwise differences between imaging days\n # - Highlight outlier imaging days and investigate reasons why\n # - Save list of top significant features for outlier days - are they size-related features?\n for grouping_variable in variables_to_analyse:\n print(\"\\nTESTING: %s\\n\" % grouping_variable)\n \n if not len(df[grouping_variable].unique()) > 1:\n print(\"Need at least two groups for stats to investigate %s\" % grouping_variable)\n else:\n print(\"Performing %s tests for '%s'\" % (test_name, grouping_variable)) \n \n test_results_df, sigfeats_out = \\\n topfeats_ANOVA_by_group(df, \n grouping_variable, \n features_to_analyse,\n TEST,\n p_value_threshold)\n \n # Ensure directory exists to save results\n Path(outDir).mkdir(exist_ok=True, parents=True)\n \n # Define outpaths\n froot = 'control_variation_in_' + grouping_variable + '_' + test_name\n stats_outpath = outDir / (froot + \"_results.csv\")\n sigfeats_outpath = outDir / (froot + \"_significant_features.csv\")\n \n # Save test statistics + significant features list to file\n test_results_df.to_csv(stats_outpath)\n sigfeats_out.to_csv(sigfeats_outpath, header=False)\n\n # Box plots\n plotDir = outDir / \"Plots\"\n topfeats_boxplots_by_group(df, \n test_results_df, \n grouping_variable,\n plot_save_dir=plotDir, #save to plotDir\n p_value_threshold=p_value_threshold)\n \n # PCA (coloured by grouping variable, eg. experiment date)\n df = doPCA(df, \n grouping_variable, \n features_to_analyse,\n plot_save_dir = plotDir,\n PCs_to_keep = PCs_to_keep)",
"def volcano_plotter():\n print(\"this is volcano plotter\")\n from math import log\n with open(\"../bob/processed/24h_bobdata_ed2_volcano.csv\", \"w\") as outF:\n outF.write(\"Gene log2FoldChange pvalue\\n\")\n with open(\"../bob/processed/24h_bobdata_ed2.csv\", \"r\") as inpF:\n skipFlag = True\n missCount = 1\n for inpLine in inpF:\n if skipFlag:\n skipFlag = False\n continue\n inpLine = inpLine.split(\"\\\" \\\"\")\n curLine = []\n for inpI in inpLine:\n try:\n curLine.append(float(inpI.strip(\"\\\"\\n \")))\n except ValueError:\n curLine.append(inpI.strip(\"\\\"\\n \")) # by this point, each line in the entry file is processed into a neat list\n if curLine[2] == \"\": # if no gene name is given, just add a placeholder\n curLine[2] = \"Noname\" + str(missCount)\n missCount += 1\n # calculate log2foldChange here:\n try:\n FAvg = (curLine[4] + curLine[5] + curLine[6])/3.0 # KO\n SAvg = (curLine[7] + curLine[8] + curLine[9])/3.0 # WT\n except TypeError:\n print(curLine)\n raise\n logFoldChange = log(SAvg/FAvg,2) # so positive numbers are more abundant in the wt cells, negatives number in the KO, at least for the 24H bobdata file\n outF.write(curLine[2] + \" \" + str(logFoldChange) + \" \" + str(curLine[10]) + \"\\n\") # write out results to file"
] | [
"0.5334454",
"0.5270723",
"0.526012",
"0.5255001",
"0.51794606",
"0.51098704",
"0.50803125",
"0.5008346",
"0.49690634",
"0.49599707",
"0.49506727",
"0.49429023",
"0.49181986",
"0.49170044",
"0.4914409",
"0.48817256",
"0.4856762",
"0.48513559",
"0.48380238",
"0.48282054",
"0.48239675",
"0.48133162",
"0.48073754",
"0.47983837",
"0.4748099",
"0.47433808",
"0.47298405",
"0.47141623",
"0.47028458",
"0.4694439"
] | 0.5875681 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.