query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
various right embeding of video
def test_embed_ok(self): self.go200('minus_upload') self.formfile('minus_upload', 'file', AUDIO_FILE) self.fv('minus_upload', 'id_embed_video', YOUTUBE_URL) self.submit200() self.notfind("Невірний") self.show() self.find("youtube_video") self.find("<object width") self.go200('minus_upload') self.formfile('minus_upload', 'file', NOTAGS_FILE) self.fv('minus_upload', 'id_embed_video', YOUTUBE_EMBED) self.submit200() self.notfind("Невірний") self.show() self.find("<object width")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def in_show_video(name, vext='.mp4', ext='.png', loop=True, autoplay=True, controls=True, embed=False, figpath=figpath, **kwargs):\n import os\n from IPython.core.display import display, Image, HTML\n from base64 import b64encode\n\n opts = 'playsinline '\n if loop: opts += 'loop '\n if autoplay: opts += 'autoplay '\n if controls: opts += 'controls '\n if embed:\n try:\n with open(os.path.join(figpath, name + ext), \"rb\") as image_file:\n im1 = b64encode(image_file.read()).decode(\"utf-8\")\n with open(os.path.join(figpath, name + '_cube' + ext), \"rb\") as image_file:\n im2 = b64encode(image_file.read()).decode(\"utf-8\")\n with open(os.path.join(figpath, name + vext), \"rb\") as video_file:\n im3 = b64encode(video_file.read()).decode(\"utf-8\")\n\n s = \"\"\"\n <center><table border=none width=100% height=100%>\n <tr>\n <td width=33%%><center><img src=\"data:image/png;base64,{0}\" width=100%/></td>\n <td rowspan=2 colspan=2><center><video src=\"data:video/webm;base64,{1}\" {2} type=\"video/{3}\" width=100%/></td>\n </tr>\n <tr>\n <td><center><img src=\"data:image/png;base64,{4}\" width=100%/></td>\n </tr>\n </table></center>\"\"\".format(im1, im3, opts, vext[1:], im2)\n # display(HTML(s))\n except:\n video = open(os.path.join(figpath, name + vext), \"rb\").read()\n video_encoded = b64encode(video).decode(\"utf-8\")\n s = \"\"\"\n <center><table border=none width=100% height=100%>\n <tr> <td width=100%><center><video {0} src=\"data:video/{1};base64,{2}\" width=100%\\>\n </td></tr></table></center>\"\"\".format(opts, vext[1:], video_encoded)\n # display(HTML(s))\n else:\n\n if os.path.isfile(os.path.join(figpath, name + ext)) and os.path.isfile(os.path.join(figpath, name + '_cube' + ext)):\n if os.path.isfile(os.path.join(figpath, name + vext)):\n s = f\"\"\"\n <center><table border=none width=100% height=100%>\n <tr>\n <td width=33%%><center><img src=\"{os.path.join(figpath, name + ext)}\" width=100%/></td>\n <td rowspan=2 colspan=2><center>\n <video width=100% {opts}>\n <source src=\"{os.path.join(figpath, name + vext)}\" type=\"video/{vext[1:]}\">\n Your browser does not support the video tag.\n </video>\n </td>\n </tr>\n <tr>\n <td><center><img src=\"{os.path.join(figpath, name + '_cube' + ext)}\" width=100%/></td>\n </tr>\n </table></center>\"\"\"\n else:\n s = \"\"\"\n <center><table border=none width=100% height=100%>\n <tr>\n <td width=50%%><center><img src=\"{0}\" width=100%/></td>\n <td><center><img src=\"{1}\" width=100%/></td>\n </tr>\n </table></center>\"\"\".format(os.path.join(figpath, name + ext),\n os.path.join(figpath, name + '_cube' + ext))\n else:\n s = \"\"\"\n <center><table border=none width=100% height=100%>\n <tr> <td width=100%><center><video {0} src=\"{2}\" type=\"video/{1}\" width=100%\\>\n </td></tr></table></center>\"\"\".format(opts, vext[1:], os.path.join(figpath, name + vext))\n html = HTML(s)\n html.reload()\n display(html)", "def embed():", "def video():\n return render_template('video.html')", "def continue_video(self):\n\n print(\"continue_video needs implementation\")", "def play_random_video(self):\n\n print(\"play_random_video needs implementation\")", "def mediaplayer(src,width=400,height=250):\n return XML('<embed allowfullscreen=\"true\" allowscriptaccess=\"always\" flashvars=\"height=%(height)s&width=%(width)s&file=%(src)s\" height=\"%(height)spx\" src=\"%(url)s\" width=\"%(width)spx\"></embed>'%dict(url=URL('static','plugin_wiki/mediaplayer.swf'),src=src,width=width,height=height))", "async def video(self, ctx, *, arg: str):\n await ctx.send(site + self.extraire(search + self.traduire(arg.split(' ')), watch_))", "async def igvideo(self, ctx, url):\n response = requests.get(url.replace(\"`\", \"\"), headers={\"Accept-Encoding\": \"utf-8\"})\n tree = html.fromstring(response.content)\n results = tree.xpath('//meta[@content]')\n sources = []\n for result in results:\n try:\n if result.attrib['property'] == \"og:video\":\n sources.append(result.attrib['content'])\n except KeyError:\n pass\n if sources:\n await ctx.send(sources[0])\n self.logger.info(misolog.format_log(ctx, f\"Success\"))\n else:\n await ctx.send(\"Found nothing, sorry!\")\n self.logger.warning(misolog.format_log(ctx, f\"Found nothing\"))", "def get_embed_url(self):\n if not self.original_url:\n return ''\n \n return 'https://vine.co/v/%s/embed/simple' % (self.get_video_id())", "def get_embed_url(self):\n if not self.get_video_id():\n return ''\n \n return 'https://player.vimeo.com/video/%s' % self.get_video_id()", "def setup_embedder_video(site):\n folder = site['institucional']['videos']\n videos = [\n {'id': 'municipio-brasil', 'img': 'capa-video1.jpg', 'text': VIDEO_TEXT},\n {'id': 'por-que-utilizar-o-portal-modelo', 'img': 'capa-video2.jpg'}\n ]\n\n for v in videos:\n embedder = folder[v['id']]\n if v.get('text'):\n embedder.text = v['text']\n path = os.path.dirname(__file__)\n data = open(os.path.join(path, 'browser/static', v['img'])).read()\n image = NamedBlobImage(data, 'image/jpeg', u'hqdefault.jpg')\n embedder.image = image\n embedder.reindexObject()\n logger.debug(u'Video embedder {0} configurado'.format(v['id']))", "def post_video(self, comment):\n\t\tpass", "def video2():\n return Response(gen_frames(2),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "def bb_gvideo(hit):\n video = hit.group(1)\n return '<object width=\"400\" height=\"326\"><param name=\"movie\" value=\"http://video.google.com/googleplayer.swf?docId=%s\"></param><param name=\"wmode\" value=\"transparent\"></param><embed src=\"http://video.google.com/googleplayer.swf?docId=%s\" wmode=\"transparent\" style=\"width:400px; height:326px;\" id=\"VideoPlayback\" type=\"application/x-shockwave-flash\" flashvars=\"\"></embed></object>' % ( video, video )", "def get_embed_url(self):\n if not self.id_video or not self.original_url or not self.xml_response:\n return ''\n return '//view.vzaar.com/{0}/player'.format(self.id_video)", "def video1():\n return Response(gen_frames(1),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "def video_feplay(title, hash):\n return '' + title + hash", "def video():\n return Response(gen(Camera()),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "def video_feed():\n return Response(gen(),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "def video_feed():\n return Response(gen(),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "def video_feed():\n return Response(gen(),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "def video_feed():\n return Response(gen(),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "def allow_video(self, video_id):\n print(\"allow_video needs implementation\")", "def allow_video(self, video_id):\n print(\"allow_video needs implementation\")", "def allow_video(self, video_id):\n print(\"allow_video needs implementation\")", "def allow_video(self, video_id):\n print(\"allow_video needs implementation\")", "def get_embed_url(self):\n if not self.get_video_id():\n return ''\n \n return 'https://www.dailymotion.com/embed/video/%s' % self.get_video_id()", "def video_feed():\n return Response(gen(),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "def video_feed():\n\treturn Response(gen(),\n\t\t\t\t\tmimetype='multipart/x-mixed-replace; boundary=frame')", "def video_feed():\n return flask.Response(frame_gen_shm(), mimetype='multipart/x-mixed-replace; boundary=frame')" ]
[ "0.67641854", "0.67451173", "0.65865034", "0.6524482", "0.64924055", "0.6408578", "0.6248291", "0.624474", "0.62401366", "0.62132764", "0.62128735", "0.62014794", "0.6188429", "0.6185427", "0.6143705", "0.61265326", "0.61246544", "0.6088066", "0.6083399", "0.6083399", "0.6083399", "0.6083399", "0.60486954", "0.60486954", "0.60486954", "0.60486954", "0.60432214", "0.6042107", "0.60157907", "0.6014354" ]
0.70237356
0
plus should be attached to minus.user not to moderator
def test_moderator_uploads_plusrecord(self): self.go200('minus_plus_upload_user',[self.user.id]) self.formfile('minus_plus_upload', 'file', AUDIO_FILE) self.submit200() self.logout('auth_logout') self.login('u', 'p', url='auth_login', formid='id_login') self.go200('minus_upload') self.showforms() self.formfile('minus_upload', 'file', AUDIO_FILE) self.submit200() plus = MinusPlusRecord.objects.all()[0] minus = MinusRecord.objects.all()[0] self.assert_equal(plus.user, self.user) self.assert_equal(minus.user, self.user) self.assert_equal(plus.minus, minus)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_add_permission(self, request):\n return request.user.is_superuser or super().has_add_permission(request)", "def allowed(self, user, amount):\n return True", "def isUMinus(self):\n return _libsbml.ASTNode_isUMinus(self)", "def addme(update: 'Update', context: 'CallbackContext'):\n user_id = update.effective_user.id\n chat_id = update.effective_chat.id\n chats = get_chat_ids(DB)\n\n if chat_id not in chats:\n update.message.reply_text('Did not work. Run this command inside the Ko-Lab group.')\n else:\n if add_member_id(DB, user_id): \n update.message.reply_text('I have added you to the whitelist. You can now send commands from outside the Ko-Lab chat.')\n else:\n update.message.reply_text('You are already on the whitelist.')", "def add(self, user: U) -> None:\n ...", "def can_approve(self, user, **data):\n raise Return(False)", "def SetHasPlus(self, has=True):\r\n\r\n self._hasPlus = has", "def test_plusrecords_on_edit(self):\n self.go200('minus_plus_upload')\n self.formfile('minus_plus_upload', 'file', AUDIO_FILE)\n self.submit200()\n self.go200('minus_upload')\n self.formfile('minus_upload', 'file', AUDIO_FILE)\n self.submit200()\n self.go200('minus_plus_upload')\n self.formfile('minus_plus_upload', 'file', AUDIO_FILE)\n self.submit200()\n self.assert_equal(MinusPlusRecord.objects.count(), 2)\n self.go200('minus_edit', [self.superuser, 1])\n self.fv('minus_upload', 'author', \"brams\")\n self.submit200()\n self.find('Плюс')\n self.assert_equal(MinusPlusRecord.objects.count(), 1)", "def add_expertise(self, user, score, is_vote=False):\r\n if user==None or user.is_anonymous():\r\n return\r\n \r\n # If user already has expertise on that message\r\n if self.is_expert(user):\r\n expert = self.expert_set.filter(user=user)[0]\r\n expert.score += score\r\n if is_vote:\r\n expert.voted = True\r\n expert.save()\r\n else:\r\n expert = Expert(message=self, user=user, score=score+1., voted=is_vote)\r\n expert.save()\r\n self.expert_set.add(expert)\r\n \r\n # Adds fraction to parent, if score still high enough\r\n if score >= OI_SCORE_ANONYMOUS:\r\n if self.parent:\r\n self.parent.add_expertise(user,score*OI_SCORE_FRACTION_TO_PARENT)", "def test_change_user(self):\n self.go200('minus_upload')\n self.formfile('minus_upload', 'file', AUDIO_FILE)\n \n self.config(\"readonly_controls_writeable\", 1)\n self.fv('minus_upload', 'user', '2')\n self.submit200()\n self.config(\"readonly_controls_writeable\", 0)\n minus = MinusRecord.objects.all()[0]\n self.url('minus_detail', [minus.author, minus.id])\n self.assert_equal(minus.user, self.superuser)", "def test_011_add_same_user(self):\n testflow.step(ADD_USR_MSG, TEST_USER1)\n assert not USER_CLI.run('add', TEST_USER1)[0]", "def grant_deny_access(self, bot, update):\n text = update.callback_query.data.split(\" \")\n command = text[0]\n user_lst = text[1:]\n # costruisce il dizionario dal messaggio\n user = {\"id\": user_lst[0], \"username\": \" \".join(user_lst[1:])}\n if (command.strip(\n \"/\") == \"consentiAccessoSi\"): # se viene garantito l'accesso salva l'user nel db e notifa user e developer\n\n if DB.execute(TABELLE[\"id_users\"][\"select\"][\"from_id\"], (user['id'],)):\n for msg in developer_message:\n bot.edit_message_text(\n chat_id=msg.chat_id,\n text=\"Lo user : \" + str(user[\"username\"]) + \", è gia presente nel db\",\n message_id=msg.message_id,\n parse_mode=\"HTML\"\n )\n return\n\n # print(\"Accesso garantito\")\n self.add_new_user(user)\n bot.send_message(user[\"id\"], \"Ti è stato garantito l'accesso al bot!\")\n\n for msg in developer_message:\n bot.edit_message_text(\n chat_id=msg.chat_id,\n text=\"L'accesso a user : \" + str(user[\"username\"]) + \", è stato garantito\",\n message_id=msg.message_id,\n parse_mode=\"HTML\"\n )\n\n else: # altrimenti aggiungi l'user alla lista bannati e notifica i developers\n # print(\"Accesso negato\")\n bot.send_message(user[\"id\"], \"Non ti è stato garantito l'accesso al bot :(\")\n self.ban_user(user)\n for msg in developer_message:\n bot.edit_message_text(\n chat_id=msg.chat_id,\n text=\"L'accesso a user : \" + str(user[\"username\"]) + \", è stato negato\",\n message_id=msg.message_id,\n parse_mode=\"HTML\"\n )\n\n developer_message.clear()", "async def test_regular_member_cannot_target_another_member(self, constants):\n constants.MODERATION_ROLES = [self.moderator_role.id]\n ctx = helpers.MockContext(author=self.author)\n\n await self.cog.user_info(self.cog, ctx, self.target)\n\n ctx.send.assert_called_once_with(\"You may not use this command on users other than yourself.\")", "def response_add(self, request, obj, post_url_continue=None):\n # We should allow further modification of the user just added i.e. the\n # 'Save' button should behave like the 'Save and continue editing'\n # button except in two scenarios:\n # * The user has pressed the 'Save and add another' button\n # * We are adding a user in a popup\n if '_addanother' not in request.POST and IS_POPUP_VAR not in request.POST:\n request.POST['_continue'] = 1\n return super(UserAdmin, self).response_add(request, obj,\n post_url_continue)", "def allow_to_edit(user):\n return allow_to_edit_well(user)", "def __neg__(self):\n return UnaryMinus(self)", "def has_add_permission(self, request):\r\n return False", "def has_add_permission(self, request):\r\n return False", "def can_assign(userid, group):", "def test_user_without_share(self):\n set_permission(Permission.EDIT, self.user1, self.collection)\n\n # Can not add permissions to users.\n data = {\"users\": {self.user2.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)\n\n # Can not add permissions to groups.\n data = {\"users\": {self.group.pk: \"view\"}}\n resp = self._detail_permissions(self.collection.pk, data, self.user1)\n self.assertEqual(resp.status_code, status.HTTP_403_FORBIDDEN)", "def test_user_is_global_moderator(self):\n thread = self.create_thread()\n user = self.create_user()\n message = thread.first_message\n message.status = 'pending'\n message.save()\n\n self.assertFalse(message.visible_to_user(user))\n\n self.add_perm(user, 'can_moderate_all_messages', 'accounts', 'user')\n\n # To get rid of the user permission cache we should re-grab our user\n latest_user = USER_MODEL.objects.get(pk=user.pk)\n self.assertTrue(message.visible_to_user(latest_user))", "def test_func(self):\n answer = self.get_object()\n return True if self.request.user == answer.author or self.request.user.is_superuser else False", "def user_added_credit(self):\n return (self.user.Credit > 0)", "def has_add_permission(self, request):\n if is_seller(request.user):\n return True\n return False", "def test_user_can_change_not_author(self):\n self.assertFalse(self.story.user_can_change(self.user2))", "def user_can_edit(self, user):\n return user == self.owner", "async def rep_user(self, ctx, *, user: discord.Member = None):\n if user and user.bot:\n return await ctx.send_line(\"😔 Sorry but I just can't do that.\")\n if user and user.id == ctx.author.id:\n return await ctx.send_line(\"🙂 Nice try but wouldn't that be unfair?\")\n author_profile = await self.cache.get_profile(ctx.author.id)\n if user is None:\n if author_profile.can_rep:\n res = \"👌 You can rep someone now.\"\n else:\n res = f\"⏳ You can rep again {author_profile.next_rep.humanize()}.\"\n return await ctx.send_line(res)\n\n if author_profile.can_rep:\n target_profile = await self.cache.get_profile(user.id)\n if not target_profile:\n res = self.plugin.data.responses.no_profile.format(user_name=user.name)\n return await ctx.send_line(res)\n await target_profile.rep(author_profile)\n res = f\"You added one reputation point to {user.name}.\"\n await ctx.send_line(res, ctx.author.avatar_url)\n else:\n res = f\"⏳ You can rep again {author_profile.next_rep.humanize()}.\"\n await ctx.send_line(res)", "def is_polyphony_user(allow_mods: bool = False):\n # TODO: Add error message that self deletes\n async def predicate(ctx: commands.context):\n user = get_user(ctx.author.id)\n is_mod = False\n if allow_mods:\n is_mod = any(\n role.name in MODERATOR_ROLES for role in ctx.message.author.roles\n )\n if is_mod or user is not None:\n return True\n else:\n await ctx.send(\n f\"Sorry {ctx.message.author.mention}. You are not a Polyphony user. Contact a moderator if you believe this is a mistake.\",\n delete_after=10,\n )\n return False\n\n return commands.check(predicate)", "def has_add_permission(self, request):\n return False", "def has_add_permission(self, request):\n return False" ]
[ "0.575054", "0.57219326", "0.5711969", "0.56312156", "0.54943043", "0.547881", "0.5472063", "0.544157", "0.5420743", "0.53919333", "0.5384767", "0.53588045", "0.53424716", "0.5340141", "0.5331763", "0.5317873", "0.5317257", "0.5317257", "0.53038454", "0.53020555", "0.52879685", "0.52818656", "0.5266991", "0.52563345", "0.52561367", "0.52483124", "0.52418137", "0.52223265", "0.52071756", "0.52071756" ]
0.581246
0
Test case for peers_get
def test_peers_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_peers_peerid_get(self):\n pass", "def test_one_peer(self):\n\n\t\tself.n = tracker.make_peer_list \\\n\t\t\t([(\"test1\", \"100.100.100.100\", \"1000\")])\n\t\tself.assertEqual(self.n, [{'ip': '100.100.100.100', \\\n\t\t\t'peer id': 'test1', 'port': 1000}])", "def test_peers_post(self):\n pass", "def test_multiple_peers(self):\n\n\t\tself.n = tracker.make_peer_list \\\n\t\t\t([(\"test1\", \"100.100.100.100\", \"1000\"), \\\n\t\t\t\t(\"test2\", \"100.100.100.100\", \"1000\")])\n\t\tself.assertEqual(self.n, [{'ip': '100.100.100.100', \\\n\t\t\t'peer id': 'test1', 'port': 1000}, \\\n\t\t\t\t{'ip': '100.100.100.100', \\\n\t\t\t\t\t'peer id': 'test2', 'port': 1000}])", "def test_peers_peerid_post(self):\n pass", "def test_get_list(self):\n result = self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"get\",\n \"vendor.fetchai.connections.p2p_libp2p.config.entry_peers\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n assert result.output == \"[]\\n\"", "def test_peers_peerid_delete(self):\n pass", "def getPeers(self, peerType):\r\n raise NotImplementedError()", "def test_multiple_peers(self):\n\n\t\tself.n = tracker.make_compact_peer_list \\\n\t\t\t([(\"test1\", \"100.100.100.100\", \"1000\"), \\\n\t\t\t\t(\"test2\", \"100.100.100.100\", \"1000\")])\n\t\tself.assertEqual(self.n, \"dddd\\x03\\xe8dddd\\x03\\xe8\")", "def test_get_peer_finished(self):\n server, client = loopback()\n\n assert server.get_peer_finished() is not None\n assert len(server.get_peer_finished()) > 0", "def test_one_peer(self):\n\n\t\tself.n = tracker.make_compact_peer_list \\\n\t\t\t([(\"test1\", \"100.100.100.100\", \"1000\")])\n\t\tself.assertEqual(self.n, \"dddd\\x03\\xe8\")", "def test_empty_peer(self):\n\n\t\tself.n = tracker.make_peer_list([])\n\t\tself.assertEqual(self.n, [])", "def peers():\n return flask.jsonify(api_utils.get_peer_conf_and_state())", "async def peers() -> dict:\n ips = [peer.ip for peer in chain.peers]\n return {\"peers\": ips}", "def get_peers(self):\n self.peers = []\n retriever_methods = [\n m\n for m in rtorrent9.peer.methods\n if m.is_retriever() and m.is_available(self._rt_obj)\n ]\n # need to leave 2nd arg empty (dunno why)\n m = rtorrent9.rpc.Multicall(self)\n m.add(\n \"p.multicall\",\n self.info_hash,\n \"\",\n *[method.rpc_call + \"=\" for method in retriever_methods]\n )\n\n results = m.call()[0] # only sent one call, only need first result\n\n for result in results:\n results_dict = {}\n # build results_dict\n for m, r in zip(retriever_methods, result):\n results_dict[m.varname] = rtorrent9.rpc.process_result(m, r)\n\n self.peers.append(Peer(self._rt_obj, self.info_hash, **results_dict))\n\n return self.peers", "def test_invalid_same_peer_id2(self):\n # Disable idle timeout before creating any new peer because self.create_peer(...)\n # runs the main loop.\n self.conn.disable_idle_timeout()\n # Create new peer and disable idle timeout.\n manager3 = self.create_peer(self.network, peer_id=self.peer_id2)\n conn = FakeConnection(manager3, self.manager1)\n # Disable idle timeout.\n conn.disable_idle_timeout()\n # HELLO\n self.assertEqual(self.conn.peek_tr1_value().split()[0], b'HELLO')\n self.assertEqual(self.conn.peek_tr2_value().split()[0], b'HELLO')\n self.assertEqual(conn.peek_tr1_value().split()[0], b'HELLO')\n self.assertEqual(conn.peek_tr2_value().split()[0], b'HELLO')\n self.conn.run_one_step()\n conn.run_one_step()\n # PEER-ID\n self.assertEqual(self.conn.peek_tr1_value().split()[0], b'PEER-ID')\n self.assertEqual(self.conn.peek_tr2_value().split()[0], b'PEER-ID')\n self.assertEqual(conn.peek_tr1_value().split()[0], b'PEER-ID')\n self.assertEqual(conn.peek_tr2_value().split()[0], b'PEER-ID')\n self.conn.run_one_step()\n conn.run_one_step()\n # READY\n self.assertEqual(self.conn.peek_tr1_value().split()[0], b'READY')\n self.assertEqual(self.conn.peek_tr2_value().split()[0], b'READY')\n self.assertEqual(conn.peek_tr1_value().split()[0], b'READY')\n self.assertEqual(conn.peek_tr2_value().split()[0], b'READY')\n self.conn.run_one_step()\n conn.run_one_step()\n # continue until messages stop\n self.conn.run_until_empty()\n conn.run_until_empty()\n self.run_to_completion()\n # one of the peers will close the connection. We don't know which one, as it depends\n # on the peer ids\n\n if self.conn.tr1.disconnecting or self.conn.tr2.disconnecting:\n conn_dead = self.conn\n conn_alive = conn\n elif conn.tr1.disconnecting or conn.tr2.disconnecting:\n conn_dead = conn\n conn_alive = self.conn\n else:\n raise Exception('It should never happen.')\n self._check_result_only_cmd(conn_dead.peek_tr1_value() + conn_dead.peek_tr2_value(), b'ERROR')\n # at this point, the connection must be closing as the error was detected on READY state\n self.assertIn(True, [conn_dead.tr1.disconnecting, conn_dead.tr2.disconnecting])\n # check connected_peers\n connected_peers = list(self.manager1.connections.connected_peers.values())\n self.assertEquals(1, len(connected_peers))\n self.assertIn(connected_peers[0], [conn_alive.proto1, conn_alive.proto2])\n # connection is still up\n self.assertIsConnected(conn_alive)", "def test_combine_peer_stats(self):\n tracts = Geo.objects.filter(geo_type=Geo.TRACT_TYPE, cbsa=request.GET.get('metro'))\n metro = Geo.objects.get(geo_type=Geo.METRO_TYPE, geoid=request.GET.get('metro'))\n lender = Institution.objects.get(institution_id=request.GET.get('lender'))\n peers = lender.get_peer_list(metro, None, None)\n peer_data_collector = []\n for peer in peers:\n peer_request = HttpRequest()\n peer_request.GET['lender'] = peer.institution.institution_id\n peer_request.GET['metro']= metro.geoid\n peer_lar_data = loan_originations_as_json(peer_request)\n peer_data_collector.append(assemble_stats(peer_lar_data, tracts))\n peer_stats = combine_peer_stats(peer_data_collector)\n self.assertEqual(peer_stats['hma_pct'], 0.0)\n self.assertEqual(peer_stats['lma_pct'], 1.0)\n self.assertEqual(peer_stats['mma_pct'], 0.0)\n self.assertEqual(peer_stats['lma'], 7)\n self.assertEqual(peer_stats['mma'], 0)\n self.assertEqual(peer_stats['hma'], 0)\n self.assertEqual(peer_stats['lar_total'], 7)", "def getpeers_command(chat, message, args):\n get_nodes = os.popen(path_to_bin + \"/bitcanna-cli getpeerinfo\").read()\n loaded_json = json.loads(get_nodes)\n msg = \"\"\n count = 0\n file_peers = os.path.join(path_to_bin + '/peers.txt')\n chat.send (\"Building a list...\") \n print (\"List of online NODES\")\n print (\"==========================\")\n for tx in loaded_json:\n msg = msg + \"IP: \" + tx[\"addr\"] + \", version: \" + tx[\"subver\"] + \"\\n\"\n count = count + 1 \n print (msg + \"\\nTotal: \" + str(count))\n with open(file_peers, 'w') as f:\n f.write(msg+ \"\\nTotal: \" + str(count))\n chat.send_file(path=file_peers, caption='This file contains all peers connected to your masternode/fullnode')", "def discover_peers():\n # TODO: Disable this function if peer discoverability is disabled in config\n\n peer_manager = load_plugin(\"chain.plugins.peers\")\n peers = peer_manager.peers()\n # Shuffle peers so we always get the peers from the different peers at the start\n random.shuffle(peers)\n for index, peer in enumerate(peers):\n his_peers = peer.fetch_peers()\n for his_peer in his_peers:\n add_peer(\n ip=his_peer.ip,\n port=his_peer.port,\n chain_version=his_peer.chain_version,\n nethash=his_peer.nethash,\n os=his_peer.os,\n )\n\n # Always get peers from at least 4 sources. As add_peer is async,\n # `has_minimum_peers` might actually return wrong result, but that will only\n # increase the number of peers we have.\n if index >= 4 and peer_manager.has_minimum_peers():\n break\n\n reverify_all_peers()", "def test_users_getting_add_peer_event(self) -> None:\n streams_to_sub = [\"multi_user_stream\"]\n othello = self.example_user(\"othello\")\n cordelia = self.example_user(\"cordelia\")\n iago = self.example_user(\"iago\")\n orig_user_ids_to_subscribe = [self.test_user.id, othello.id]\n self.common_subscribe_to_streams(\n self.test_user,\n streams_to_sub,\n dict(principals=orjson.dumps(orig_user_ids_to_subscribe).decode()),\n )\n\n new_user_ids_to_subscribe = [iago.id, cordelia.id]\n with self.capture_send_event_calls(expected_num_events=5) as events:\n self.common_subscribe_to_streams(\n self.test_user,\n streams_to_sub,\n dict(principals=orjson.dumps(new_user_ids_to_subscribe).decode()),\n )\n\n add_peer_events = [event for event in events if event[\"event\"].get(\"op\") == \"peer_add\"]\n (add_peer_event,) = add_peer_events\n\n self.assertEqual(add_peer_event[\"event\"][\"type\"], \"subscription\")\n self.assertEqual(add_peer_event[\"event\"][\"op\"], \"peer_add\")\n event_sent_to_ids = add_peer_event[\"users\"]\n for user_id in new_user_ids_to_subscribe:\n # Make sure new users subscribed to stream is not in\n # peer_add event recipient list\n self.assertNotIn(user_id, event_sent_to_ids)\n for old_user in orig_user_ids_to_subscribe:\n # Check non-new users are in peer_add event recipient list.\n self.assertIn(old_user, event_sent_to_ids)", "def _try_peers(self, peers):\n for peer_entry in peers:\n if peer_entry['id'] == self.peer_id:\n continue\n\n print('Trying peer: {}'.format(peer_entry))\n peer = Peer(peer_entry['id'], peer_entry['ip'], peer_entry['port'], self._torrent)\n try:\n peer.connect(self.peer_id)\n except PeerConnectionError:\n continue\n else:\n self._peers.append(peer)\n peer.subscribe_for_messages_to_client(self.peer_message_receiver(peer))", "def peer_list_all(self):\n return self.client.call('GET', self.name + 'peer-list/all')", "def test_candidates_retrieve(self):\n pass", "def test_empty_peer(self):\n\n\t\tself.n = tracker.make_compact_peer_list([])\n\t\tself.assertEqual(self.n, \"\")", "def peer_list_reachable(self):\n return self.client.call('GET', self.name + 'peer-list/reachable')", "def test_wallets_get(self):\n pass", "def getConnectedPeers(self, peerType):\r\n raise NotImplementedError()", "def test_users_getting_remove_peer_event(self) -> None:\n user1 = self.example_user(\"othello\")\n user2 = self.example_user(\"cordelia\")\n user3 = self.example_user(\"hamlet\")\n user4 = self.example_user(\"iago\")\n user5 = self.example_user(\"AARON\")\n guest = self.example_user(\"polonius\")\n\n realm = user1.realm\n\n stream1 = self.make_stream(\"stream1\")\n stream2 = self.make_stream(\"stream2\")\n stream3 = self.make_stream(\"stream3\")\n private = self.make_stream(\"private_stream\", invite_only=True)\n\n self.subscribe(user1, \"stream1\")\n self.subscribe(user2, \"stream1\")\n self.subscribe(user3, \"stream1\")\n\n self.subscribe(user2, \"stream2\")\n self.subscribe(user2, \"stream3\")\n\n self.subscribe(user1, \"private_stream\")\n self.subscribe(user2, \"private_stream\")\n self.subscribe(user3, \"private_stream\")\n\n # Sends 3 peer-remove events and 2 unsubscribe events.\n with self.assert_database_query_count(16):\n with self.assert_memcached_count(3):\n with self.capture_send_event_calls(expected_num_events=5) as events:\n bulk_remove_subscriptions(\n realm,\n [user1, user2],\n [stream1, stream2, stream3, private],\n acting_user=None,\n )\n\n peer_events = [e for e in events if e[\"event\"].get(\"op\") == \"peer_remove\"]\n\n # We only care about a subset of users when we inspect\n # peer_remove events.\n our_user_ids = {\n user1.id,\n user2.id,\n user3.id,\n user4.id,\n user5.id,\n guest.id,\n }\n\n notifications = []\n for event in peer_events:\n stream_ids = event[\"event\"][\"stream_ids\"]\n stream_names = sorted(Stream.objects.get(id=stream_id).name for stream_id in stream_ids)\n removed_user_ids = set(event[\"event\"][\"user_ids\"])\n notified_user_ids = set(event[\"users\"]) & our_user_ids\n notifications.append((\",\".join(stream_names), removed_user_ids, notified_user_ids))\n\n notifications.sort(key=lambda tup: tup[0])\n\n self.assertEqual(\n notifications,\n [\n (\"private_stream\", {user1.id, user2.id}, {user3.id, user4.id}),\n (\"stream1\", {user1.id, user2.id}, {user3.id, user4.id, user5.id}),\n (\"stream2,stream3\", {user2.id}, {user1.id, user3.id, user4.id, user5.id}),\n ],\n )", "def test_get_network(self):\n pass", "def test_nonexistant_peer_status(self):\n status = self.pybird.get_peer_status(\"HAMSTER\")\n self.assertEquals(status, None)" ]
[ "0.8696221", "0.7192812", "0.711802", "0.70360357", "0.70274997", "0.6773914", "0.6765761", "0.67139876", "0.6710879", "0.66730374", "0.65417737", "0.6500344", "0.6307875", "0.62696064", "0.62148637", "0.61876035", "0.61713827", "0.61438066", "0.61334497", "0.6088506", "0.60723984", "0.6059658", "0.6018746", "0.6017201", "0.5960455", "0.59550405", "0.58841354", "0.5854949", "0.5852464", "0.58014554" ]
0.8996001
0
Test case for peers_peerid_delete
def test_peers_peerid_delete(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_peers_peerid_get(self):\n pass", "def test_peers_peerid_post(self):\n pass", "def test_bgp_peer_remove(self, m_client):\n # Set up arguments\n address = '1.2.3.4'\n\n # Call method under test\n bgp_peer_remove(address, 4)\n\n # Assert\n m_client.remove_bgp_peer.assert_called_once_with(4, IPAddress(address))", "def test_meme_meme_id_delete(self):\n pass", "def delete_bgp_peer(virtualInterfaceId=None, asn=None, customerAddress=None):\n pass", "def test_user_id_delete(self):\n pass", "def test_client_address_delete(self):\n pass", "def test_delete_team_member(self):\n pass", "def remove_peer(self, peer_id):\n del self.peers[peer_id]", "def test_users_getting_remove_peer_event(self) -> None:\n user1 = self.example_user(\"othello\")\n user2 = self.example_user(\"cordelia\")\n user3 = self.example_user(\"hamlet\")\n user4 = self.example_user(\"iago\")\n user5 = self.example_user(\"AARON\")\n guest = self.example_user(\"polonius\")\n\n realm = user1.realm\n\n stream1 = self.make_stream(\"stream1\")\n stream2 = self.make_stream(\"stream2\")\n stream3 = self.make_stream(\"stream3\")\n private = self.make_stream(\"private_stream\", invite_only=True)\n\n self.subscribe(user1, \"stream1\")\n self.subscribe(user2, \"stream1\")\n self.subscribe(user3, \"stream1\")\n\n self.subscribe(user2, \"stream2\")\n self.subscribe(user2, \"stream3\")\n\n self.subscribe(user1, \"private_stream\")\n self.subscribe(user2, \"private_stream\")\n self.subscribe(user3, \"private_stream\")\n\n # Sends 3 peer-remove events and 2 unsubscribe events.\n with self.assert_database_query_count(16):\n with self.assert_memcached_count(3):\n with self.capture_send_event_calls(expected_num_events=5) as events:\n bulk_remove_subscriptions(\n realm,\n [user1, user2],\n [stream1, stream2, stream3, private],\n acting_user=None,\n )\n\n peer_events = [e for e in events if e[\"event\"].get(\"op\") == \"peer_remove\"]\n\n # We only care about a subset of users when we inspect\n # peer_remove events.\n our_user_ids = {\n user1.id,\n user2.id,\n user3.id,\n user4.id,\n user5.id,\n guest.id,\n }\n\n notifications = []\n for event in peer_events:\n stream_ids = event[\"event\"][\"stream_ids\"]\n stream_names = sorted(Stream.objects.get(id=stream_id).name for stream_id in stream_ids)\n removed_user_ids = set(event[\"event\"][\"user_ids\"])\n notified_user_ids = set(event[\"users\"]) & our_user_ids\n notifications.append((\",\".join(stream_names), removed_user_ids, notified_user_ids))\n\n notifications.sort(key=lambda tup: tup[0])\n\n self.assertEqual(\n notifications,\n [\n (\"private_stream\", {user1.id, user2.id}, {user3.id, user4.id}),\n (\"stream1\", {user1.id, user2.id}, {user3.id, user4.id, user5.id}),\n (\"stream2,stream3\", {user2.id}, {user1.id, user3.id, user4.id, user5.id}),\n ],\n )", "def test_delete_client(self):\n pass", "def test_peers_get(self):\n pass", "def test_delete_device_by_id(self):\n pass", "def test_delete_device_group_member_by_id(self):\n pass", "def test_delete_identity(self):\n pass", "def test_coupledmodels_id_delete(self):\n pass", "def test_delete_device_group_member_by_id1(self):\n pass", "def test_delete_network(self):\n pass", "def test_delete_alert_by_id(self):\n pass", "def test_delete_message(client, test_db):\n rv = client.get(\"/delete/1\")\n data = json.loads(rv.data)\n assert data[\"status\"] == 0\n login(client, app.config[\"USERNAME\"], app.config[\"PASSWORD\"])\n rv = client.get(\"/delete/1\")\n data = json.loads(rv.data)\n assert data[\"status\"] == 1", "def test_candidate_deletion(self):\r\n self.register_user()\r\n result = self.login_user()\r\n access_token = json.loads(result.data.decode())['access_token']\r\n\r\n rv = self.client().post('/candidate',headers=dict(Authorization=access_token),data=self.candidate)\r\n self.assertEqual(rv.status_code, 201)\r\n results = json.loads(rv.data.decode())\r\n\r\n # delete the candidate we just created\r\n res = self.client().delete('/candidate',headers=dict(Authorization=access_token),data={'enrolement_no':results['enrolement_no']})\r\n self.assertEqual(res.status_code, 200)\r\n\r\n #Test to see if it exists, should return a 404\r\n result = self.client().get('/candidate',headers=dict(Authorization=access_token), data={'enrolement_no':results['enrolement_no']})\r\n self.assertEqual(result.status_code, 404)", "def test_delete_device_by_id1(self):\n pass", "def delete(self, _id):", "def drop_peer(self, peer_id: str) -> None:\n\n session = self.session()\n try:\n session\\\n .query(ResourceTable)\\\n .filter(ResourceTable.peerId == peer_id)\\\n .delete()\n session.commit()\n\n finally:\n session.close()", "def test_peers_post(self):\n pass", "def test_delete_email_address(self):\n email_addr = 'delete@' + self.email_dom\n addr = SpokeEmailAddress(self.org_name, self.user_id)\n addr.create(email_addr)\n self.assertTrue(addr.delete(email_addr))", "def delete():", "def test_delete(self):\n pass", "def test_delete_device(self):\n pass", "def test_delete_device(self):\n pass" ]
[ "0.7332794", "0.70445776", "0.6931957", "0.6640967", "0.6611744", "0.6462411", "0.6403419", "0.6368902", "0.6346265", "0.6344083", "0.6313252", "0.62687737", "0.6234457", "0.6207562", "0.6190389", "0.6161354", "0.6135893", "0.61328804", "0.6119607", "0.6095823", "0.60923946", "0.6090184", "0.6018817", "0.60074943", "0.6000557", "0.59921277", "0.59849674", "0.5984401", "0.5982424", "0.5982424" ]
0.94635904
0
Test case for peers_peerid_get
def test_peers_peerid_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_peers_get(self):\n pass", "def test_peers_peerid_post(self):\n pass", "def test_peers_peerid_delete(self):\n pass", "def _LookupPeer(self, peer_id):\n key = self._GetServerKey(peer_id)\n values, placemark = self._dht.Get(key)\n if not values:\n raise NessieError('No peers returned for user id %r.' % peer_id)\n # NOTE(damonkohler): Need to accomodate for the possibility of multipe\n # values.\n value = self._Decrypt(values[0])\n host, port = value.split(':')\n port = int(port)\n return host, port", "def test_one_peer(self):\n\n\t\tself.n = tracker.make_peer_list \\\n\t\t\t([(\"test1\", \"100.100.100.100\", \"1000\")])\n\t\tself.assertEqual(self.n, [{'ip': '100.100.100.100', \\\n\t\t\t'peer id': 'test1', 'port': 1000}])", "def test_invalid_same_peer_id2(self):\n # Disable idle timeout before creating any new peer because self.create_peer(...)\n # runs the main loop.\n self.conn.disable_idle_timeout()\n # Create new peer and disable idle timeout.\n manager3 = self.create_peer(self.network, peer_id=self.peer_id2)\n conn = FakeConnection(manager3, self.manager1)\n # Disable idle timeout.\n conn.disable_idle_timeout()\n # HELLO\n self.assertEqual(self.conn.peek_tr1_value().split()[0], b'HELLO')\n self.assertEqual(self.conn.peek_tr2_value().split()[0], b'HELLO')\n self.assertEqual(conn.peek_tr1_value().split()[0], b'HELLO')\n self.assertEqual(conn.peek_tr2_value().split()[0], b'HELLO')\n self.conn.run_one_step()\n conn.run_one_step()\n # PEER-ID\n self.assertEqual(self.conn.peek_tr1_value().split()[0], b'PEER-ID')\n self.assertEqual(self.conn.peek_tr2_value().split()[0], b'PEER-ID')\n self.assertEqual(conn.peek_tr1_value().split()[0], b'PEER-ID')\n self.assertEqual(conn.peek_tr2_value().split()[0], b'PEER-ID')\n self.conn.run_one_step()\n conn.run_one_step()\n # READY\n self.assertEqual(self.conn.peek_tr1_value().split()[0], b'READY')\n self.assertEqual(self.conn.peek_tr2_value().split()[0], b'READY')\n self.assertEqual(conn.peek_tr1_value().split()[0], b'READY')\n self.assertEqual(conn.peek_tr2_value().split()[0], b'READY')\n self.conn.run_one_step()\n conn.run_one_step()\n # continue until messages stop\n self.conn.run_until_empty()\n conn.run_until_empty()\n self.run_to_completion()\n # one of the peers will close the connection. We don't know which one, as it depends\n # on the peer ids\n\n if self.conn.tr1.disconnecting or self.conn.tr2.disconnecting:\n conn_dead = self.conn\n conn_alive = conn\n elif conn.tr1.disconnecting or conn.tr2.disconnecting:\n conn_dead = conn\n conn_alive = self.conn\n else:\n raise Exception('It should never happen.')\n self._check_result_only_cmd(conn_dead.peek_tr1_value() + conn_dead.peek_tr2_value(), b'ERROR')\n # at this point, the connection must be closing as the error was detected on READY state\n self.assertIn(True, [conn_dead.tr1.disconnecting, conn_dead.tr2.disconnecting])\n # check connected_peers\n connected_peers = list(self.manager1.connections.connected_peers.values())\n self.assertEquals(1, len(connected_peers))\n self.assertIn(connected_peers[0], [conn_alive.proto1, conn_alive.proto2])\n # connection is still up\n self.assertIsConnected(conn_alive)", "def test_multiple_peers(self):\n\n\t\tself.n = tracker.make_peer_list \\\n\t\t\t([(\"test1\", \"100.100.100.100\", \"1000\"), \\\n\t\t\t\t(\"test2\", \"100.100.100.100\", \"1000\")])\n\t\tself.assertEqual(self.n, [{'ip': '100.100.100.100', \\\n\t\t\t'peer id': 'test1', 'port': 1000}, \\\n\t\t\t\t{'ip': '100.100.100.100', \\\n\t\t\t\t\t'peer id': 'test2', 'port': 1000}])", "def test_one_peer(self):\n\n\t\tself.n = tracker.make_compact_peer_list \\\n\t\t\t([(\"test1\", \"100.100.100.100\", \"1000\")])\n\t\tself.assertEqual(self.n, \"dddd\\x03\\xe8\")", "def test_prefectures_id_get(self):\n pass", "def test_gridironfootballplayers_id_get(self):\n pass", "def test_meme_meme_id_get(self):\n pass", "def test_unique_peer(self):\n\n\t\tself.db = {}\n\t\ttracker.add_peer(self.db, \\\n\t\t\t\"test_hash\", \"test\", \"100.100.100.100\", 1000)\n\t\tself.assertEqual(self.db, \\\n\t\t\t{'test_hash': [('test', '100.100.100.100', 1000)]})", "def test_get_chain_by_id(self):\n pass", "def test_multiple_peers(self):\n\n\t\tself.n = tracker.make_compact_peer_list \\\n\t\t\t([(\"test1\", \"100.100.100.100\", \"1000\"), \\\n\t\t\t\t(\"test2\", \"100.100.100.100\", \"1000\")])\n\t\tself.assertEqual(self.n, \"dddd\\x03\\xe8dddd\\x03\\xe8\")", "def test_get_peer_finished(self):\n server, client = loopback()\n\n assert server.get_peer_finished() is not None\n assert len(server.get_peer_finished()) > 0", "def test_peers_post(self):\n pass", "def test_intercommunalitys_id_get(self):\n pass", "async def resolve_peer(\n self,\n peer_id: Union[int, str]\n ) -> Union[raw.base.InputPeer, raw.base.InputUser, raw.base.InputChannel]:\n if not self.is_connected:\n raise ConnectionError(\"Client has not been started yet\")\n\n try:\n return await self.storage.get_peer_by_id(peer_id)\n except KeyError:\n if isinstance(peer_id, str):\n if peer_id in (\"self\", \"me\"):\n return raw.types.InputPeerSelf()\n\n peer_id = re.sub(r\"[@+\\s]\", \"\", peer_id.lower())\n\n try:\n int(peer_id)\n except ValueError:\n try:\n return await self.storage.get_peer_by_username(peer_id)\n except KeyError:\n await self.send(\n raw.functions.contacts.ResolveUsername(\n username=peer_id\n )\n )\n\n return await self.storage.get_peer_by_username(peer_id)\n else:\n try:\n return await self.storage.get_peer_by_phone_number(peer_id)\n except KeyError:\n raise PeerIdInvalid\n\n peer_type = utils.get_peer_type(peer_id)\n\n if peer_type == \"user\":\n await self.fetch_peers(\n await self.send(\n raw.functions.users.GetUsers(\n id=[\n raw.types.InputUser(\n user_id=peer_id,\n access_hash=0\n )\n ]\n )\n )\n )\n elif peer_type == \"chat\":\n await self.send(\n raw.functions.messages.GetChats(\n id=[-peer_id]\n )\n )\n else:\n await self.send(\n raw.functions.channels.GetChannels(\n id=[\n raw.types.InputChannel(\n channel_id=utils.get_channel_id(peer_id),\n access_hash=0\n )\n ]\n )\n )\n\n try:\n return await self.storage.get_peer_by_id(peer_id)\n except KeyError:\n raise PeerIdInvalid", "def test_cyclingleagues_id_get(self):\n pass", "def test_plays_id_get(self):\n pass", "def get_members_ids(\n self,\n peer_id: int\n ) -> List[int]:\n members = self.get_members(peer_id)\n if \"error\" in members:\n return None\n return [m[\"member_id\"]\n for m in members[\"response\"][\"items\"] if m[\"member_id\"] > 0]", "def getPeer(self):\n return \"Peer:PID:\" + str(self.transport.pid)", "def test_users_getting_add_peer_event(self) -> None:\n streams_to_sub = [\"multi_user_stream\"]\n othello = self.example_user(\"othello\")\n cordelia = self.example_user(\"cordelia\")\n iago = self.example_user(\"iago\")\n orig_user_ids_to_subscribe = [self.test_user.id, othello.id]\n self.common_subscribe_to_streams(\n self.test_user,\n streams_to_sub,\n dict(principals=orjson.dumps(orig_user_ids_to_subscribe).decode()),\n )\n\n new_user_ids_to_subscribe = [iago.id, cordelia.id]\n with self.capture_send_event_calls(expected_num_events=5) as events:\n self.common_subscribe_to_streams(\n self.test_user,\n streams_to_sub,\n dict(principals=orjson.dumps(new_user_ids_to_subscribe).decode()),\n )\n\n add_peer_events = [event for event in events if event[\"event\"].get(\"op\") == \"peer_add\"]\n (add_peer_event,) = add_peer_events\n\n self.assertEqual(add_peer_event[\"event\"][\"type\"], \"subscription\")\n self.assertEqual(add_peer_event[\"event\"][\"op\"], \"peer_add\")\n event_sent_to_ids = add_peer_event[\"users\"]\n for user_id in new_user_ids_to_subscribe:\n # Make sure new users subscribed to stream is not in\n # peer_add event recipient list\n self.assertNotIn(user_id, event_sent_to_ids)\n for old_user in orig_user_ids_to_subscribe:\n # Check non-new users are in peer_add event recipient list.\n self.assertIn(old_user, event_sent_to_ids)", "def test_user_id_get(self):\n pass", "def test_nonexistant_peer_status(self):\n status = self.pybird.get_peer_status(\"HAMSTER\")\n self.assertEquals(status, None)", "def test_get_signatures_by_participant_id(self):\n response = self.client.open(\n \"/api/signatures/participantId/{participantId}\".format(participantId=789),\n method=\"GET\",\n )\n self.assert200(response, \"Response body is : \" + response.data.decode(\"utf-8\"))", "async def peers() -> dict:\n ips = [peer.ip for peer in chain.peers]\n return {\"peers\": ips}", "def test_solareclipses_id_get(self):\n pass", "def honeypot_get_peering(self,honeypotids):\n req = {\"type\":\"get_peering\",\n \"from\":self.network.mc_id,\n \"to\":honeypotids}\n expect_dict = {\"type\":\"send_peering\"}\n msg_list = self.send_receive(req,honeypotids,expect_dict)\n answer = {}\n for msg in msg_list:\n answer[msg[\"from\"]] = [msg[\"ip\"],msg[\"port\"]]\n return answer", "def peer(node_index):\n node = Node.from_index(node_index)\n pub_key = get(node, 'getinfo')['identity_pubkey']\n address = f'{pub_key}@localhost:{node.port}'\n click.echo(click.style(address, fg='green'))" ]
[ "0.76344407", "0.74507415", "0.73697764", "0.66594434", "0.65899813", "0.62889266", "0.6225391", "0.6156838", "0.61484265", "0.6077912", "0.60704", "0.5968563", "0.595071", "0.5886904", "0.587434", "0.5861096", "0.58559394", "0.5853292", "0.58342946", "0.58150953", "0.58023834", "0.568051", "0.5666957", "0.5652124", "0.5632004", "0.5602319", "0.5575734", "0.55505264", "0.5547432", "0.5533856" ]
0.93089193
0
Test case for peers_peerid_post
def test_peers_peerid_post(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_peers_post(self):\n pass", "def test_peers_peerid_get(self):\n pass", "def test_peers_peerid_delete(self):\n pass", "def test_peers_get(self):\n pass", "def test_duplicate_peer(self):\n\n\t\tself.db = {'test_hash': [('test', '100.100.100.100', 1000)]}\n\t\ttracker.add_peer(self.db, \\\n\t\t\t\"test_hash\", \"test\", \"100.100.100.100\", 1000)\n\t\tself.assertEqual(self.db, \\\n\t\t\t{'test_hash': [('test', '100.100.100.100', 1000)]})", "def test_unique_peer(self):\n\n\t\tself.db = {}\n\t\ttracker.add_peer(self.db, \\\n\t\t\t\"test_hash\", \"test\", \"100.100.100.100\", 1000)\n\t\tself.assertEqual(self.db, \\\n\t\t\t{'test_hash': [('test', '100.100.100.100', 1000)]})", "def test_one_peer(self):\n\n\t\tself.n = tracker.make_peer_list \\\n\t\t\t([(\"test1\", \"100.100.100.100\", \"1000\")])\n\t\tself.assertEqual(self.n, [{'ip': '100.100.100.100', \\\n\t\t\t'peer id': 'test1', 'port': 1000}])", "def test_users_getting_add_peer_event(self) -> None:\n streams_to_sub = [\"multi_user_stream\"]\n othello = self.example_user(\"othello\")\n cordelia = self.example_user(\"cordelia\")\n iago = self.example_user(\"iago\")\n orig_user_ids_to_subscribe = [self.test_user.id, othello.id]\n self.common_subscribe_to_streams(\n self.test_user,\n streams_to_sub,\n dict(principals=orjson.dumps(orig_user_ids_to_subscribe).decode()),\n )\n\n new_user_ids_to_subscribe = [iago.id, cordelia.id]\n with self.capture_send_event_calls(expected_num_events=5) as events:\n self.common_subscribe_to_streams(\n self.test_user,\n streams_to_sub,\n dict(principals=orjson.dumps(new_user_ids_to_subscribe).decode()),\n )\n\n add_peer_events = [event for event in events if event[\"event\"].get(\"op\") == \"peer_add\"]\n (add_peer_event,) = add_peer_events\n\n self.assertEqual(add_peer_event[\"event\"][\"type\"], \"subscription\")\n self.assertEqual(add_peer_event[\"event\"][\"op\"], \"peer_add\")\n event_sent_to_ids = add_peer_event[\"users\"]\n for user_id in new_user_ids_to_subscribe:\n # Make sure new users subscribed to stream is not in\n # peer_add event recipient list\n self.assertNotIn(user_id, event_sent_to_ids)\n for old_user in orig_user_ids_to_subscribe:\n # Check non-new users are in peer_add event recipient list.\n self.assertIn(old_user, event_sent_to_ids)", "def test_multiple_peers(self):\n\n\t\tself.n = tracker.make_peer_list \\\n\t\t\t([(\"test1\", \"100.100.100.100\", \"1000\"), \\\n\t\t\t\t(\"test2\", \"100.100.100.100\", \"1000\")])\n\t\tself.assertEqual(self.n, [{'ip': '100.100.100.100', \\\n\t\t\t'peer id': 'test1', 'port': 1000}, \\\n\t\t\t\t{'ip': '100.100.100.100', \\\n\t\t\t\t\t'peer id': 'test2', 'port': 1000}])", "def test_invalid_same_peer_id2(self):\n # Disable idle timeout before creating any new peer because self.create_peer(...)\n # runs the main loop.\n self.conn.disable_idle_timeout()\n # Create new peer and disable idle timeout.\n manager3 = self.create_peer(self.network, peer_id=self.peer_id2)\n conn = FakeConnection(manager3, self.manager1)\n # Disable idle timeout.\n conn.disable_idle_timeout()\n # HELLO\n self.assertEqual(self.conn.peek_tr1_value().split()[0], b'HELLO')\n self.assertEqual(self.conn.peek_tr2_value().split()[0], b'HELLO')\n self.assertEqual(conn.peek_tr1_value().split()[0], b'HELLO')\n self.assertEqual(conn.peek_tr2_value().split()[0], b'HELLO')\n self.conn.run_one_step()\n conn.run_one_step()\n # PEER-ID\n self.assertEqual(self.conn.peek_tr1_value().split()[0], b'PEER-ID')\n self.assertEqual(self.conn.peek_tr2_value().split()[0], b'PEER-ID')\n self.assertEqual(conn.peek_tr1_value().split()[0], b'PEER-ID')\n self.assertEqual(conn.peek_tr2_value().split()[0], b'PEER-ID')\n self.conn.run_one_step()\n conn.run_one_step()\n # READY\n self.assertEqual(self.conn.peek_tr1_value().split()[0], b'READY')\n self.assertEqual(self.conn.peek_tr2_value().split()[0], b'READY')\n self.assertEqual(conn.peek_tr1_value().split()[0], b'READY')\n self.assertEqual(conn.peek_tr2_value().split()[0], b'READY')\n self.conn.run_one_step()\n conn.run_one_step()\n # continue until messages stop\n self.conn.run_until_empty()\n conn.run_until_empty()\n self.run_to_completion()\n # one of the peers will close the connection. We don't know which one, as it depends\n # on the peer ids\n\n if self.conn.tr1.disconnecting or self.conn.tr2.disconnecting:\n conn_dead = self.conn\n conn_alive = conn\n elif conn.tr1.disconnecting or conn.tr2.disconnecting:\n conn_dead = conn\n conn_alive = self.conn\n else:\n raise Exception('It should never happen.')\n self._check_result_only_cmd(conn_dead.peek_tr1_value() + conn_dead.peek_tr2_value(), b'ERROR')\n # at this point, the connection must be closing as the error was detected on READY state\n self.assertIn(True, [conn_dead.tr1.disconnecting, conn_dead.tr2.disconnecting])\n # check connected_peers\n connected_peers = list(self.manager1.connections.connected_peers.values())\n self.assertEquals(1, len(connected_peers))\n self.assertIn(connected_peers[0], [conn_alive.proto1, conn_alive.proto2])\n # connection is still up\n self.assertIsConnected(conn_alive)", "def test_post_users_post(self):\n pass", "def test_post_user_post(self):\n pass", "def test_meme_post(self):\n pass", "def test_post_chain(self):\n pass", "def test_post_query_reply_offers(self):\n pass", "def test_wallets_post(self):\n pass", "def submit(id, host):", "async def test_create_and_forget_post_on_target(fixture_account):\n _ = await create_and_forget_post(fixture_account, TARGET_NODE, REFERENCE_NODE)", "def test_one_peer(self):\n\n\t\tself.n = tracker.make_compact_peer_list \\\n\t\t\t([(\"test1\", \"100.100.100.100\", \"1000\")])\n\t\tself.assertEqual(self.n, \"dddd\\x03\\xe8\")", "def test_multiple_peers(self):\n\n\t\tself.n = tracker.make_compact_peer_list \\\n\t\t\t([(\"test1\", \"100.100.100.100\", \"1000\"), \\\n\t\t\t\t(\"test2\", \"100.100.100.100\", \"1000\")])\n\t\tself.assertEqual(self.n, \"dddd\\x03\\xe8dddd\\x03\\xe8\")", "def test_get_peer_finished(self):\n server, client = loopback()\n\n assert server.get_peer_finished() is not None\n assert len(server.get_peer_finished()) > 0", "def test_post_same_user_twice(self):\n self.client.post(self.url, {\"email\": \"[email protected]\"})\n self.client.post(self.url, {\"email\": \"[email protected]\"})\n self.assertEqual(self.event.participants.count(), 1)", "def test_post(self):\n pass", "def test_resolve_post(client):\n g.test_authorized_for = []\n res = client.post(\"/v0/resolve\", json=post_json_data)\n expected_response = '{\"msg\":\"Thanks for resolving the issue!\",' '\"status\":\"ok\"}'\n assert expected_response in res.data.decode(\"utf-8\")", "async def test_create_and_forget_post_on_reference(fixture_account):\n _ = await create_and_forget_post(fixture_account, REFERENCE_NODE, TARGET_NODE)", "def add_stun_peer(peer_id, pub_ip=\"101.81.15.129\", pub_port=\"36900\", pri_ip=\"192.168.1.2\", pri_port=\"36900\", nat_type=0):\n rc = redis.StrictRedis(host=REDIS_SINGLE_HOST, port=REDIS_PORT, db=0)\n if type(peer_id) != list:\n peer_id = [peer_id]\n\n for pid in peer_id:\n key = Template('STUN_$peerid')\n KEY = key.substitute(peerid=pid)\n value = Template(\n \"{\\\"nat_type\\\":$nat_type,\\\"pub_ip\\\":\\\"$pub_ip\\\",\\\"pub_port\\\":$pub_port,\\\"pri_ip\\\":\\\"$pri_ip\\\",\"\n \"\\\"pri_port\\\":$pri_port}\"\n )\n VALUE = value.substitute(nat_type=nat_type, pub_ip=pub_ip, pub_port=pub_port, pri_ip=pri_ip, pri_port=pri_port)\n rc.setex(KEY, 300, VALUE)", "def test_users_post(self):\n pass", "def test_users_post(self):\n pass", "def addPeer(self, peerType, peerId):\r\n raise NotImplementedError()", "def test_receive_telegram_POST(self):\n\n response = self.client.post(\n reverse('transcript:record_telegram'),\n content_type='application/json',\n data='''\n {\n \"update_id\":10000,\n \"message\":{\n \"date\":1441645532,\n \"chat\":{\n \"last_name\":\"Test Lastname\",\n \"id\":1111111,\n \"type\": \"private\",\n \"first_name\":\"Test\",\n \"username\":\"Test\"\n },\n \"message_id\":1365,\n \"from\":{\n \"last_name\":\"Test Lastname\",\n \"id\":1111111,\n \"first_name\":\"Test\",\n \"username\":\"Test\"\n },\n \"text\":\"/start\"\n }\n }''')\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.content, b\"OK\")\n self.assertEqual(Message.objects.count(), 1)" ]
[ "0.8070357", "0.7367078", "0.70842206", "0.6161115", "0.60336685", "0.6007113", "0.6005517", "0.5882451", "0.58606833", "0.57756877", "0.56751156", "0.5589279", "0.54988295", "0.5494192", "0.5477512", "0.54616493", "0.54128593", "0.5382333", "0.5368021", "0.5351914", "0.5335653", "0.5276066", "0.5270643", "0.5235551", "0.522549", "0.52040446", "0.5200127", "0.5200127", "0.51256675", "0.5108618" ]
0.918874
0
Test case for peers_post
def test_peers_post(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_peers_peerid_post(self):\n pass", "def test_post_chain(self):\n pass", "def test_post_users_post(self):\n pass", "def test_peers_get(self):\n pass", "def test_meme_post(self):\n pass", "def test_post(self):\n pass", "def test_post_user_post(self):\n pass", "def test_wallets_post(self):\n pass", "def test_peers_peerid_delete(self):\n pass", "def test_peers_peerid_get(self):\n pass", "def test_post_query_reply_offers(self):\n pass", "def test_smoker_post(self):\n pass", "def test_users_getting_add_peer_event(self) -> None:\n streams_to_sub = [\"multi_user_stream\"]\n othello = self.example_user(\"othello\")\n cordelia = self.example_user(\"cordelia\")\n iago = self.example_user(\"iago\")\n orig_user_ids_to_subscribe = [self.test_user.id, othello.id]\n self.common_subscribe_to_streams(\n self.test_user,\n streams_to_sub,\n dict(principals=orjson.dumps(orig_user_ids_to_subscribe).decode()),\n )\n\n new_user_ids_to_subscribe = [iago.id, cordelia.id]\n with self.capture_send_event_calls(expected_num_events=5) as events:\n self.common_subscribe_to_streams(\n self.test_user,\n streams_to_sub,\n dict(principals=orjson.dumps(new_user_ids_to_subscribe).decode()),\n )\n\n add_peer_events = [event for event in events if event[\"event\"].get(\"op\") == \"peer_add\"]\n (add_peer_event,) = add_peer_events\n\n self.assertEqual(add_peer_event[\"event\"][\"type\"], \"subscription\")\n self.assertEqual(add_peer_event[\"event\"][\"op\"], \"peer_add\")\n event_sent_to_ids = add_peer_event[\"users\"]\n for user_id in new_user_ids_to_subscribe:\n # Make sure new users subscribed to stream is not in\n # peer_add event recipient list\n self.assertNotIn(user_id, event_sent_to_ids)\n for old_user in orig_user_ids_to_subscribe:\n # Check non-new users are in peer_add event recipient list.\n self.assertIn(old_user, event_sent_to_ids)", "def test_message_post(self):\r\n\r\n submission_time = datetime.strftime(datetime.now(UTC), xqueue_interface.dateformat)\r\n\r\n feedback_post = {\r\n 'feedback': 'feedback text',\r\n 'submission_id': '1',\r\n 'grader_id': '1',\r\n 'score': 3\r\n }\r\n result = self.openendedmodule.message_post(feedback_post, self.test_system)\r\n self.assertTrue(result['success'])\r\n\r\n # make sure it's actually sending something we want to the queue\r\n mock_send_to_queue_body_arg = json.loads(self.mock_xqueue.send_to_queue.call_args[1]['body'])\r\n self.assertEqual(mock_send_to_queue_body_arg['feedback'], feedback_post['feedback'])\r\n self.assertEqual(mock_send_to_queue_body_arg['submission_id'], int(feedback_post['submission_id']))\r\n self.assertEqual(mock_send_to_queue_body_arg['grader_id'], int(feedback_post['grader_id']))\r\n self.assertEqual(mock_send_to_queue_body_arg['score'], feedback_post['score'])\r\n body_arg_student_info = json.loads(mock_send_to_queue_body_arg['student_info'])\r\n self.assertEqual(body_arg_student_info['anonymous_student_id'], self.test_system.anonymous_student_id)\r\n self.assertGreaterEqual(body_arg_student_info['submission_time'], submission_time)\r\n\r\n state = json.loads(self.openendedmodule.get_instance_state())\r\n self.assertEqual(state['child_state'], OpenEndedModule.DONE)", "def test_users_post(self):\n pass", "def test_users_post(self):\n pass", "def test_one_peer(self):\n\n\t\tself.n = tracker.make_peer_list \\\n\t\t\t([(\"test1\", \"100.100.100.100\", \"1000\")])\n\t\tself.assertEqual(self.n, [{'ip': '100.100.100.100', \\\n\t\t\t'peer id': 'test1', 'port': 1000}])", "def test_post_nveto_pmts(self):\n pass", "def test_posthardwares(self):\n pass", "def test_post(self):\n return self.doRequest(self.url, method=\"POST\", body=self.input)", "def test_message_post_fail(self):\r\n\r\n self.mock_xqueue.send_to_queue.return_value = (1, \"Not Queued\")\r\n\r\n feedback_post = {\r\n 'feedback': 'feedback text',\r\n 'submission_id': '1',\r\n 'grader_id': '1',\r\n 'score': 3\r\n }\r\n result = self.openendedmodule.message_post(feedback_post, self.test_system)\r\n self.assertFalse(result['success'])\r\n\r\n state = json.loads(self.openendedmodule.get_instance_state())\r\n self.assertNotEqual(state['child_state'], OpenEndedModule.DONE)", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass", "def post(self):\n pass" ]
[ "0.8632148", "0.6686478", "0.65633565", "0.6543095", "0.65333927", "0.64554286", "0.6429344", "0.6422975", "0.6381392", "0.63392735", "0.6260694", "0.61105293", "0.6106663", "0.60841507", "0.60485256", "0.60485256", "0.6039322", "0.5976668", "0.59709466", "0.5966034", "0.59545416", "0.5944224", "0.5944224", "0.5944224", "0.5944224", "0.5944224", "0.5944224", "0.5944224", "0.5944224", "0.5944224" ]
0.8919058
0
Test case for volumes_get
def test_volumes_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aws_service_api_volume_get(self):\n pass", "def test_aws_service_api_volumes_get(self):\n pass", "def test_aws_service_api_volume_types_get(self):\n pass", "def volumes(self):", "def test_get_volume(self):\n self.assertEqual(self.cat_a.volume(), 6000)", "def get_volumes_detail(self, **kw):\n return (200, {}, {\"volumes\": [\n {'id': 1234,\n 'name': 'sample-volume for cinder',\n 'attachments': [{'server_id': 12234}]},\n {'id': 'pvcvolume',\n 'name': 'pvc sample-volume for cinder',\n 'attachments': [{'server_id': 54321}]}\n ]})", "def test_view_volume(self, volume, volumes_steps):\n volumes_steps.view_volume(volume.name)", "def test_aws_service_api_volumes_post(self):\n pass", "def test_aws_service_api_volume_patch(self):\n pass", "def fusion_api_get_storage_volumes(self, uri=None, param='', api=None, headers=None):\n return self.volume.get(uri=uri, api=api, headers=headers, param=param)", "def volume_get(context, volume_id):\n return _volume_get(context, volume_id)", "def get_volumes(self):\n res = self.get('%s/volumes' % self.catalog['volume'])\n if res['status'] == 200:\n return json.loads(res['body'])['volumes']\n else:\n LOG.error('Get volumes failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)", "def test_aws_service_api_volume_delete(self):\n pass", "def test_volumes_post(self):\n pass", "def list_volumes(self):\n print '# Listing existing volumes'\n self.compute.list_volumes()", "def test_vault_get_vault_item(self):\n pass", "def get_basic_volume_info_all():\n vl = None\n try:\n d, err = xml_parse.run_gluster_command(\n '/usr/sbin/gluster volume info all --xml')\n if err:\n raise Exception(err)\n\n root = d[\"root\"]\n\n # Get the admin vol name so it can be excluded from the list\n admin_vol_name, err = config.get_admin_vol_name()\n if err:\n raise Exception(err)\n\n # Now get the all the volume info for user created volumes\n vl, err = xml_parse.get_volume_info(root, admin_vol_name)\n if err:\n raise Exception(err)\n except Exception, e:\n return None, 'Error getting basic volume information for all volumes : %s' % str(e)\n else:\n return vl, None", "def _get_volumes(list_of_volume_ids):\n\n ec2_client = connection.EC2ConnectionClient().client()\n\n try:\n volumes = ec2_client.get_all_volumes(\n volume_ids=list_of_volume_ids)\n except boto.exception.EC2ResponseError as e:\n if 'InvalidVolume.NotFound' in e:\n all_volumes = ec2_client.get_all_volumes()\n utils.log_available_resources(all_volumes)\n return None\n except boto.exception.BotoServerError as e:\n raise NonRecoverableError('{0}'.format(str(e)))\n\n return volumes", "def _get_data_volumes(vm_):\n ret = []\n volumes = vm_[\"volumes\"]\n for key, value in volumes.items():\n # Verify the required 'disk_size' property is present in the cloud\n # profile config\n if \"disk_size\" not in volumes[key].keys():\n raise SaltCloudConfigError(\n \"The volume '{}' is missing 'disk_size'\".format(key)\n )\n # Use 'HDD' if no 'disk_type' property is present in cloud profile\n if \"disk_type\" not in volumes[key].keys():\n volumes[key][\"disk_type\"] = \"HDD\"\n\n # Construct volume object and assign to a list.\n volume = Volume(\n name=key,\n size=volumes[key][\"disk_size\"],\n disk_type=volumes[key][\"disk_type\"],\n licence_type=\"OTHER\",\n )\n\n # Set volume availability zone if defined in the cloud profile\n if \"disk_availability_zone\" in volumes[key].keys():\n volume.availability_zone = volumes[key][\"disk_availability_zone\"]\n\n ret.append(volume)\n\n return ret", "def get_volumes():\n vols = []\n try:\n result = run_diskpart(['list volume'])\n except subprocess.CalledProcessError:\n pass\n else:\n # Append volume numbers\n output = result.stdout.decode().strip()\n for tmp in re.findall(r'Volume (\\d+)\\s+([A-Za-z]?)\\s+', output):\n vols.append({'Number': tmp[0], 'Letter': tmp[1]})\n\n return vols", "def test_calculate_volume(self, mock_send_cli_cmd):\n self.log.display_title(title=self.tool.get_current_function_name())\n self.log.step_num = 0\n msg = \"calculate volume with number\"\n response = [\"2000\", \"400\", \"-\"]\n\n result = self.ins.calculate_volume(\n device=None,\n count=response,\n )\n self.assertTrue(result)\n\n msg = \"calculate volume with number with wing1_volume\"\n response = [\"2000\", \"400\"]\n\n result = self.ins.calculate_volume(\n device=None,\n count=response,\n wing1_volume=\"1000\"\n )\n self.assertTrue(result)", "def get_volumes(self):\n url = self._get_url() + 'volumes'\n volumes = self._request(url)\n return volumes.json()", "def test_update_volume_stats(self):\n actual = self.driver.get_volume_stats(True)\n self.assertEqual('HGST', actual['vendor_name'])\n self.assertEqual('hgst', actual['storage_protocol'])\n self.assertEqual(90, actual['total_capacity_gb'])\n self.assertEqual(87, actual['free_capacity_gb'])\n self.assertEqual(0, actual['reserved_percentage'])", "def test_least_busy_host_gets_volume(self):\n volume1 = self.start_service('volume', host='host1')\n volume2 = self.start_service('volume', host='host2')\n volume_id1 = self._create_volume()\n volume1.create_volume(self.context, volume_id1)\n volume_id2 = self._create_volume()\n host = self.scheduler.driver.schedule_create_volume(self.context,\n volume_id2)\n self.assertEqual(host, 'host2')\n volume1.delete_volume(self.context, volume_id1)\n db.volume_destroy(self.context, volume_id2)\n volume1.kill()\n volume2.kill()", "def get_volume_info(volumes):\n if type(volumes) is not list:\n volumes = [volumes]\n volume_info_list = []\n for volume in volumes:\n command = 'cinder show %s' % volume['id']\n volume_info = parse_output(Popen(command.split(), stdout=STDOUT,\n stderr=STDERR).communicate()[0])\n att = volume_info['attachments'].replace(\"'\", \"\\\"\").replace(\n \"u\\\"\", \"\\\"\").replace(\" None,\", \" \\\"None\\\",\")\n volume_info['device'] = json.loads(att)[0]['device']\n volume_info_list.append(volume_info)\n return volume_info_list", "def describe_volumes(InstanceId=None, StackId=None, RaidArrayId=None, VolumeIds=None):\n pass", "def ft_volumeslice( slice_name ):\n print \"slice: %s\" % slice_name\n \n volumes = get_volumeslice_volume_names( slice_name )\n \n print \"volumes mounted in slice %s:\" % slice_name\n for v in volumes:\n print \" %s:\" % v\n \n vs = get_volumeslice( v, slice_name )\n \n print \" %s\" % dir(vs)", "def test_volumes_simple_volume(self):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\n r\"\"\"\n image: na\n volumes:\n /cpath: /hpath\n \"\"\"\n )\n\n config = scuba.config.load_config(\".scuba.yml\")\n assert len(config.volumes) == 1\n\n v = config.volumes[\"/cpath\"]\n assert v.container_path == \"/cpath\"\n assert v.host_path == \"/hpath\"", "def test_aws_service_api_vm_get(self):\n pass", "def test_volumes_complex(self):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\n r\"\"\"\n image: na\n volumes:\n /foo: /host/foo\n /bar:\n hostpath: /host/bar\n /snap:\n hostpath: /host/snap\n options: z,ro\n \"\"\"\n )\n\n config = scuba.config.load_config(\".scuba.yml\")\n vols = config.volumes\n assert len(vols) == 3\n\n v = vols[\"/foo\"]\n assert isinstance(v, scuba.config.ScubaVolume)\n assert v.container_path == \"/foo\"\n assert v.host_path == \"/host/foo\"\n assert v.options == []\n\n v = vols[\"/bar\"]\n assert isinstance(v, scuba.config.ScubaVolume)\n assert v.container_path == \"/bar\"\n assert v.host_path == \"/host/bar\"\n assert v.options == []\n\n v = vols[\"/snap\"]\n assert isinstance(v, scuba.config.ScubaVolume)\n assert v.container_path == \"/snap\"\n assert v.host_path == \"/host/snap\"\n assert v.options == [\"z\", \"ro\"]" ]
[ "0.8623218", "0.86202246", "0.76679945", "0.75886875", "0.7179493", "0.7160929", "0.71130466", "0.70252365", "0.69838876", "0.69788086", "0.6976262", "0.6910008", "0.68536305", "0.6829676", "0.6827403", "0.6480373", "0.64743745", "0.6473025", "0.645679", "0.6447579", "0.6440788", "0.639202", "0.63649726", "0.63192916", "0.6273308", "0.62627965", "0.62573415", "0.6250424", "0.62470907", "0.62314" ]
0.89812547
0
Test case for volumes_post
def test_volumes_post(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_aws_service_api_volumes_post(self):\n pass", "def test_volumes_get(self):\n pass", "def test_volumes_volname_start_post(self):\n pass", "def test_volumes_volname_stop_post(self):\n pass", "def volumes(self):", "def test_aws_service_api_volume_delete(self):\n pass", "def test_post_creation(self):\n host = synthetic_host(\"myserver\")\n self.create_simple_filesystem(host)\n\n spare_volume = synthetic_volume_full(host)\n\n response = self.api_client.post(\n \"/api/target/\", data={\"kind\": \"OST\", \"filesystem_id\": self.fs.id, \"volume_id\": spare_volume.id}\n )\n self.assertHttpAccepted(response)", "def test_aws_service_api_volumes_get(self):\n pass", "def test_aws_service_api_volume_attachment_put(self):\n pass", "def test_pvcvolume_attach(self):\n v = self.cs.volumes.get('pvcvolume')\n self.cs.volumes.attach(v, 1, '/dev/vdc')\n self.cs.assert_called('POST',\n '/volumes/pvcvolume/action')", "def test_aws_service_api_volume_patch(self):\n pass", "def test_aws_service_api_volume_get(self):\n pass", "def test_delete__volume(self):\n arglist = [\n '--volume',\n self.projects[0].id,\n ]\n verifylist = [\n ('service', 'volume'),\n ('project', self.projects[0].id),\n ]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n result = self.cmd.take_action(parsed_args)\n\n self.assertIsNone(result)\n self.projects_mock.get.assert_called_once_with(self.projects[0].id)\n self.compute_quotas_mock.delete.assert_not_called()\n self.volume_quotas_mock.delete.assert_called_once_with(\n self.projects[0].id,\n )\n self.network_mock.delete_quota.assert_not_called()", "def test_finish_resize_with_volumes(self):\n\n # create instance\n instance = self._create_fake_instance_obj()\n request_spec = objects.RequestSpec()\n\n # create volume\n volume = {'instance_uuid': None,\n 'device_name': None,\n 'id': uuids.volume,\n 'size': 200,\n 'attach_status': 'detached'}\n bdm = objects.BlockDeviceMapping(\n **{'context': self.context,\n 'source_type': 'volume',\n 'destination_type': 'volume',\n 'volume_id': uuids.volume,\n 'instance_uuid': instance['uuid'],\n 'device_name': '/dev/vdc'})\n bdm.create()\n\n # stub out volume attach\n def fake_volume_get(self, context, volume_id, microversion=None):\n return volume\n self.stub_out('nova.volume.cinder.API.get', fake_volume_get)\n\n def fake_volume_check_availability_zone(self, context,\n volume_id, instance):\n pass\n self.stub_out('nova.volume.cinder.API.check_availability_zone',\n fake_volume_check_availability_zone)\n\n def fake_get_volume_encryption_metadata(self, context, volume_id):\n return {}\n self.stub_out('nova.volume.cinder.API.get_volume_encryption_metadata',\n fake_get_volume_encryption_metadata)\n\n orig_connection_data = {\n 'target_discovered': True,\n 'target_iqn': 'iqn.2010-10.org.openstack:%s.1' % uuids.volume_id,\n 'target_portal': '127.0.0.0.1:3260',\n 'volume_id': uuids.volume_id,\n }\n connection_info = {\n 'driver_volume_type': 'iscsi',\n 'data': orig_connection_data,\n }\n\n def fake_init_conn(self, context, volume_id, session):\n return connection_info\n self.stub_out('nova.volume.cinder.API.initialize_connection',\n fake_init_conn)\n\n def fake_attach(self, context, volume_id, instance_uuid, device_name,\n mode='rw'):\n volume['instance_uuid'] = instance_uuid\n volume['device_name'] = device_name\n self.stub_out('nova.volume.cinder.API.attach', fake_attach)\n\n # stub out virt driver attach\n def fake_get_volume_connector(*args, **kwargs):\n return {}\n self.stub_out('nova.virt.fake.FakeDriver.get_volume_connector',\n fake_get_volume_connector)\n\n def fake_attach_volume(*args, **kwargs):\n pass\n self.stub_out('nova.virt.fake.FakeDriver.attach_volume',\n fake_attach_volume)\n\n # attach volume to instance\n self.compute.attach_volume(self.context, instance, bdm)\n\n # assert volume attached correctly\n self.assertEqual(volume['device_name'], '/dev/vdc')\n disk_info = db.block_device_mapping_get_all_by_instance(\n self.context, instance.uuid)\n self.assertEqual(len(disk_info), 1)\n for bdm in disk_info:\n self.assertEqual(bdm['device_name'], volume['device_name'])\n self.assertEqual(bdm['connection_info'],\n jsonutils.dumps(connection_info))\n\n # begin resize\n flavor = self.default_flavor\n instance.task_state = task_states.RESIZE_PREP\n instance.save()\n self.compute.prep_resize(self.context, instance=instance,\n flavor=flavor,\n image={}, request_spec=request_spec,\n filter_properties={}, node=None,\n clean_shutdown=True, migration=None,\n host_list=[])\n\n # fake out detach for prep_resize (and later terminate)\n def fake_terminate_connection(self, context, volume, connector):\n connection_info['data'] = None\n self.stub_out('nova.volume.cinder.API.terminate_connection',\n fake_terminate_connection)\n\n migration = objects.Migration.get_by_instance_and_status(\n self.context.elevated(),\n instance.uuid, 'pre-migrating')\n self.compute.resize_instance(self.context, instance=instance,\n migration=migration, image={},\n # TODO(stephenfin): Why a JSON string?\n flavor=jsonutils.to_primitive(flavor),\n clean_shutdown=True, request_spec=request_spec)\n\n # assert bdm is unchanged\n disk_info = db.block_device_mapping_get_all_by_instance(\n self.context, instance.uuid)\n self.assertEqual(len(disk_info), 1)\n for bdm in disk_info:\n self.assertEqual(bdm['device_name'], volume['device_name'])\n cached_connection_info = jsonutils.loads(bdm['connection_info'])\n self.assertEqual(cached_connection_info['data'],\n orig_connection_data)\n # but connection was terminated\n self.assertIsNone(connection_info['data'])\n\n # stub out virt driver finish_migration\n def fake(*args, **kwargs):\n pass\n self.stub_out('nova.virt.fake.FakeDriver.finish_migration', fake)\n\n instance.task_state = task_states.RESIZE_MIGRATED\n instance.save()\n\n # new initialize connection\n new_connection_data = dict(orig_connection_data)\n new_iqn = 'iqn.2010-10.org.openstack:%s.2' % uuids.volume_id,\n new_connection_data['target_iqn'] = new_iqn\n\n def fake_init_conn_with_data(self, context, volume, session):\n connection_info['data'] = new_connection_data\n return connection_info\n self.stub_out('nova.volume.cinder.API.initialize_connection',\n fake_init_conn_with_data)\n\n self.compute.finish_resize(self.context,\n migration=migration,\n disk_info={}, image={}, instance=instance,\n request_spec=request_spec)\n\n # assert volume attached correctly\n disk_info = db.block_device_mapping_get_all_by_instance(\n self.context, instance['uuid'])\n self.assertEqual(len(disk_info), 1)\n for bdm in disk_info:\n self.assertEqual(bdm['connection_info'],\n jsonutils.dumps(connection_info))\n\n # stub out detach\n def fake_detach(self, context, volume_uuid):\n volume['device_path'] = None\n volume['instance_uuid'] = None\n self.stub_out('nova.volume.cinder.API.detach', fake_detach)\n\n # clean up\n self.compute.terminate_instance(self.context, instance, [])", "def test_aws_service_api_volume_attachment_delete(self):\n pass", "def test_manage_volume_attachments(self, volume, instance, volumes_steps):\n volumes_steps.attach_instance(volume.name, instance.name)\n volumes_steps.detach_instance(volume.name, instance.name)", "def test_upload_volume_to_image(self, volume, images_steps, volumes_steps):\n image_name = next(generate_ids('image', length=20))\n volumes_steps.upload_volume_to_image(volume.name, image_name)\n\n images_steps.page_images().table_images.row(\n name=image_name).wait_for_presence(30)\n images_steps.delete_image(image_name)", "def test_upload_new_vdisk(self, mock_create_file):\n\n # traits are already set to use the REST API upload\n\n # First need to load in the various test responses.\n vg_orig = tju.load_file(UPLOAD_VOL_GRP_ORIG, self.adpt)\n vg_post_crt = tju.load_file(UPLOAD_VOL_GRP_NEW_VDISK, self.adpt)\n\n self.adpt.read.return_value = vg_orig\n self.adpt.update_by_path.return_value = vg_post_crt\n mock_create_file.return_value = self._fake_meta()\n\n n_vdisk, f_wrap = ts.upload_new_vdisk(\n self.adpt, self.v_uuid, self.vg_uuid, None, 'test2', 50,\n d_size=25, sha_chksum='abc123')\n\n # Ensure the create file was called\n mock_create_file.assert_called_once_with(\n self.adpt, 'test2', vf.FileType.DISK_IMAGE, self.v_uuid,\n f_size=50, tdev_udid='0300f8d6de00004b000000014a54555cd9.3',\n sha_chksum='abc123')\n\n # Ensure cleanup was called after the upload\n self.adpt.delete.assert_called_once_with(\n 'File', service='web',\n root_id='6233b070-31cc-4b57-99bd-37f80e845de9')\n self.assertIsNone(f_wrap)\n self.assertIsNotNone(n_vdisk)\n self.assertIsInstance(n_vdisk, stor.VDisk)", "def test_create_volume(self, mock_ghn):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n volume = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10}\n ret = self.driver.create_volume(volume)\n expected = {'redundancy': '0', 'group': 'xanadu',\n 'name': 'volume10', 'mode': '0777',\n 'user': 'kane', 'net': 'net1',\n 'storageserver': 'stor1:gbd0,stor2:gbd0,',\n 'size': '12'}\n self.assertDictMatch(expected, self.created)\n # Check the returned provider, note that provider_id is hashed\n expected_pid = {'provider_id': 'volume10'}\n self.assertDictMatch(expected_pid, ret)", "def post_volumes_pvcvolume_action(self, body, **kw):\n _body = None\n resp = 202\n assert len(list(body.keys())) == 1\n action = list(body.keys())[0]\n if action == 'os-attach':\n assert sorted(list(body[action])) == ['instance_uuid',\n 'mode',\n 'mountpoint']\n elif action == 'os-detach':\n assert body[action] is None\n elif action == 'os-reserve':\n assert body[action] is None\n elif action == 'os-unreserve':\n assert body[action] is None\n elif action == 'os-initialize_connection':\n assert list(body[action].keys()) == ['connector']\n return (202, {}, {'connection_info': 'foos'})\n elif action == 'os-terminate_connection':\n assert list(body[action].keys()) == ['connector']\n elif action == 'os-begin_detaching':\n assert body[action] is None\n elif action == 'os-roll_detaching':\n assert body[action] is None\n elif action == 'os-reset_status':\n assert 'status' in body[action]\n else:\n raise AssertionError(\"Unexpected action: %s\" % action)\n return (resp, {}, _body)", "def test_migrate_volume(self, volume, volumes_steps):\n old_host, _ = volumes_steps.migrate_volume(volume.name)\n volumes_steps.migrate_volume(volume.name, old_host)", "def test_create_volume_from_snapshot(self, mock_ghn):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n snap = {'id': '1', 'name': 'volume1', 'display_name': '',\n 'volume_type_id': type_ref['id'], 'size': 10,\n 'provider_id': 'space_orig'}\n volume = {'id': '2', 'name': 'volume2', 'display_name': '',\n 'volume_type_id': type_ref['id'], 'size': 10}\n pid = self.driver.create_volume_from_snapshot(volume, snap)\n # We must copy entier underlying storage, ~12GB, not just 10GB\n self.assertEqual(11444 * units.Mi, self.dd_count)\n self.assertEqual('1M', self.bs)\n # Check space-create command\n expected = {'redundancy': '0', 'group': 'xanadu',\n 'name': 'volume2', 'mode': '0777',\n 'user': 'kane', 'net': 'net1',\n 'storageserver': 'stor1:gbd0,stor2:gbd0,',\n 'size': '12'}\n self.assertDictMatch(expected, self.created)\n # Check the returned provider\n expected_pid = {'provider_id': 'volume2'}\n self.assertDictMatch(expected_pid, pid)", "def test_aws_service_api_snapshots_post(self):\n pass", "def update_volumes():\n print 'do something useful here'", "def _test_pc_mm_document_with_volume(client, json_headers):\n\n parent_pid = \"serid-1\"\n parent_pid_type = \"serid\"\n child_pid = \"docid-2\"\n child_pid_type = \"docid\"\n relation_type = \"multipart_monograph\"\n\n payload = {\n \"parent_pid\": parent_pid,\n \"parent_pid_type\": parent_pid_type,\n \"child_pid\": child_pid,\n \"child_pid_type\": child_pid_type,\n \"relation_type\": relation_type,\n \"volume\": \"v.3\",\n }\n\n def _test_create_mm_document_with_volume(create_using_pid1=True):\n \"\"\"Test relation creation with volume of MM and Document.\"\"\"\n\n parent, child = _choose_endpoints_and_do_request(\n (client, json_headers, \"POST\"),\n (parent_pid, parent_pid_type, child_pid, child_pid_type),\n payload,\n create_using_pid1=create_using_pid1,\n )\n\n _assert_record_relations(\n parent,\n expected={\n \"relations_metadata\": {\n \"multipart_monograph\": [\n {\n \"pid\": child_pid,\n \"pid_type\": child_pid_type,\n \"volume\": \"v.3\",\n\n }\n ]\n },\n \"relations\": {},\n },\n )\n\n _assert_record_relations(\n child,\n expected={\n \"relations\": {\n \"multipart_monograph\": [\n {\n \"pid\": parent_pid,\n \"pid_type\": parent_pid_type,\n \"title\": parent[\"title\"],\n \"volume\": \"v.3\",\n \"relation_type\": \"multipart_monograph\",\n }\n ]\n }\n },\n )\n\n def _test_delete_mm_document_with_volume(create_using_pid1=True):\n \"\"\"Test relation deletion with volume of MM and Document.\"\"\"\n\n parent, child = _choose_endpoints_and_do_request(\n (client, json_headers, \"DELETE\"),\n (parent_pid, parent_pid_type, child_pid, child_pid_type),\n payload,\n create_using_pid1=create_using_pid1,\n )\n\n _assert_record_relations(\n parent,\n expected={\n \"relations\": {}\n },\n )\n _assert_record_relations(child, expected={\"relations\": {}})\n\n _test_create_mm_document_with_volume()\n _test_delete_mm_document_with_volume()\n _test_create_mm_document_with_volume(create_using_pid1=False)\n _test_delete_mm_document_with_volume(create_using_pid1=False)\n # recreate for the next one, to have some more valuable test data\n _test_create_mm_document_with_volume()", "def test_delete_volumes(self, volumes_count, volumes_steps,\n create_volumes):\n volume_names = list(generate_ids('volume', count=volumes_count))\n create_volumes(volume_names)", "def test_delete_volume(self):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n volume = {'id': '1', 'name': 'volume1',\n 'display_name': '',\n 'volume_type_id': type_ref['id'],\n 'size': 10,\n 'provider_id': 'volume10'}\n self.driver.delete_volume(volume)\n expected = {'name': 'volume10'}\n self.assertDictMatch(expected, self.deleted)", "def test_aws_service_api_vms_post(self):\n pass", "def test_view_volume(self, volume, volumes_steps):\n volumes_steps.view_volume(volume.name)", "def test_aws_service_api_volume_types_get(self):\n pass" ]
[ "0.7987023", "0.7041567", "0.7020928", "0.68210614", "0.6572613", "0.649182", "0.64764535", "0.6456867", "0.64517254", "0.64322543", "0.63329756", "0.6299428", "0.6261842", "0.6189976", "0.61466736", "0.6118623", "0.6074925", "0.6065953", "0.6029333", "0.5986011", "0.59761596", "0.59562373", "0.58966994", "0.5888879", "0.58356166", "0.5822404", "0.5819845", "0.5796022", "0.57831466", "0.5781541" ]
0.8835188
0
Test case for volumes_volname_start_post
def test_volumes_volname_start_post(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_volumes_volname_stop_post(self):\n pass", "def test_volumes_post(self):\n pass", "def test_aws_service_api_volumes_post(self):\n pass", "def test_pvcvolume_attach(self):\n v = self.cs.volumes.get('pvcvolume')\n self.cs.volumes.attach(v, 1, '/dev/vdc')\n self.cs.assert_called('POST',\n '/volumes/pvcvolume/action')", "def test_backend_name_distinction(self):\n self._test_backend_name_distinction(self.volume_id_list_without_prefix)", "def volume_stop_or_start(vol_name, op):\n\n return_dict = None\n try:\n cmd = 'gluster --mode=script volume %s %s --xml' % (op, vol_name)\n return_dict, err = xml_parse.run_gluster_command(cmd)\n if err:\n raise Exception(err)\n except Exception, e:\n return None, 'Error stopping/starting volume : %s' % str(e)\n else:\n return return_dict, None", "def test_volumes_get(self):\n pass", "def test_backend_name_distinction_with_prefix(self):\n self._test_backend_name_distinction(self.volume_id_list_with_prefix)", "def test_migrate_volume(self, volume, volumes_steps):\n old_host, _ = volumes_steps.migrate_volume(volume.name)\n volumes_steps.migrate_volume(volume.name, old_host)", "def started(name):\n ret = {\"name\": name, \"changes\": {}, \"comment\": \"\", \"result\": False}\n\n volinfo = __salt__[\"glusterfs.info\"]()\n if name not in volinfo:\n ret[\"result\"] = False\n ret[\"comment\"] = \"Volume {} does not exist\".format(name)\n return ret\n\n if int(volinfo[name][\"status\"]) == 1:\n ret[\"comment\"] = \"Volume {} is already started\".format(name)\n ret[\"result\"] = True\n return ret\n elif __opts__[\"test\"]:\n ret[\"comment\"] = \"Volume {} will be started\".format(name)\n ret[\"result\"] = None\n return ret\n\n vol_started = __salt__[\"glusterfs.start_volume\"](name)\n if vol_started:\n ret[\"result\"] = True\n ret[\"comment\"] = \"Volume {} is started\".format(name)\n ret[\"change\"] = {\"new\": \"started\", \"old\": \"stopped\"}\n else:\n ret[\"result\"] = False\n ret[\"comment\"] = \"Failed to start volume {}\".format(name)\n\n return ret", "def test_least_busy_host_gets_volume(self):\n volume1 = self.start_service('volume', host='host1')\n volume2 = self.start_service('volume', host='host2')\n volume_id1 = self._create_volume()\n volume1.create_volume(self.context, volume_id1)\n volume_id2 = self._create_volume()\n host = self.scheduler.driver.schedule_create_volume(self.context,\n volume_id2)\n self.assertEqual(host, 'host2')\n volume1.delete_volume(self.context, volume_id1)\n db.volume_destroy(self.context, volume_id2)\n volume1.kill()\n volume2.kill()", "def volume_present(\n name,\n bricks,\n stripe=False,\n replica=False,\n device_vg=False,\n transport=\"tcp\",\n start=False,\n force=False,\n arbiter=False,\n):\n ret = {\"name\": name, \"changes\": {}, \"comment\": \"\", \"result\": False}\n\n if suc.check_name(name, \"a-zA-Z0-9._-\"):\n ret[\"comment\"] = \"Invalid characters in volume name.\"\n return ret\n\n volumes = __salt__[\"glusterfs.list_volumes\"]()\n if name not in volumes:\n if __opts__[\"test\"]:\n comment = \"Volume {} will be created\".format(name)\n if start:\n comment += \" and started\"\n ret[\"comment\"] = comment\n ret[\"result\"] = None\n return ret\n\n vol_created = __salt__[\"glusterfs.create_volume\"](\n name, bricks, stripe, replica, device_vg, transport, start, force, arbiter\n )\n\n if not vol_created:\n ret[\"comment\"] = \"Creation of volume {} failed\".format(name)\n return ret\n old_volumes = volumes\n volumes = __salt__[\"glusterfs.list_volumes\"]()\n if name in volumes:\n ret[\"changes\"] = {\"new\": volumes, \"old\": old_volumes}\n ret[\"comment\"] = \"Volume {} is created\".format(name)\n\n else:\n ret[\"comment\"] = \"Volume {} already exists\".format(name)\n\n if start:\n if __opts__[\"test\"]:\n # volume already exists\n ret[\"comment\"] = ret[\"comment\"] + \" and will be started\"\n ret[\"result\"] = None\n return ret\n if int(__salt__[\"glusterfs.info\"]()[name][\"status\"]) == 1:\n ret[\"result\"] = True\n ret[\"comment\"] = ret[\"comment\"] + \" and is started\"\n else:\n vol_started = __salt__[\"glusterfs.start_volume\"](name)\n if vol_started:\n ret[\"result\"] = True\n ret[\"comment\"] = ret[\"comment\"] + \" and is now started\"\n if not ret[\"changes\"]:\n ret[\"changes\"] = {\"new\": \"started\", \"old\": \"stopped\"}\n else:\n ret[\"comment\"] = (\n ret[\"comment\"]\n + \" but failed to start. Check logs for further information\"\n )\n return ret\n\n if __opts__[\"test\"]:\n ret[\"result\"] = None\n else:\n ret[\"result\"] = True\n return ret", "def test_post_creation(self):\n host = synthetic_host(\"myserver\")\n self.create_simple_filesystem(host)\n\n spare_volume = synthetic_volume_full(host)\n\n response = self.api_client.post(\n \"/api/target/\", data={\"kind\": \"OST\", \"filesystem_id\": self.fs.id, \"volume_id\": spare_volume.id}\n )\n self.assertHttpAccepted(response)", "def test_int_smoke_virtualbox():\n\n test_dir = \"tests/int/simple_virtualbox\"\n utils.cleanup_dir_and_vms_from_dir(test_dir, names=['firstvbsmoke'])\n\n # should init\n command = \"mech init --provider virtualbox --name firstvbsmoke bento/ubuntu-18.04\"\n expected_lines = [\"Initializing\", \"Loading metadata\", \"has been initialized\", \"mech up\"]\n results = subprocess.run(command, cwd=test_dir, shell=True, capture_output=True)\n stdout = results.stdout.decode('utf-8')\n stderr = results.stderr.decode('utf-8')\n assert stderr == ''\n assert results.returncode == 0\n for line in expected_lines:\n print(line)\n assert re.search(line, stdout, re.MULTILINE)\n\n # should start\n command = \"mech up\"\n expected_lines = [\"virtualbox\", \"Extracting\", \"Sharing folders\",\n \"Getting IP\", \"started\", \"Provisioning\"]\n results = subprocess.run(command, cwd=test_dir, shell=True, capture_output=True)\n stdout = results.stdout.decode('utf-8')\n stderr = results.stderr.decode('utf-8')\n assert stderr == ''\n assert results.returncode == 0\n for line in expected_lines:\n print(line)\n assert re.search(line, stdout, re.MULTILINE)\n\n # should be able to re-up, verify 'start' alias works, too\n commands = [\"mech up\", \"mech start\"]\n expected_lines = [\"Getting IP\", \"started\"]\n for command in commands:\n results = subprocess.run(commands, cwd=test_dir, shell=True, capture_output=True)\n stdout = results.stdout.decode('utf-8')\n stderr = results.stderr.decode('utf-8')\n assert stderr == ''\n assert results.returncode == 0\n for line in expected_lines:\n print(line)\n assert re.search(line, stdout, re.MULTILINE)\n\n # test 'mech ps'\n command = \"mech ps firstvbsmoke\"\n expected_lines = [\"/sbin/init\"]\n results = subprocess.run(command, cwd=test_dir, shell=True, capture_output=True)\n stdout = results.stdout.decode('utf-8')\n stderr = results.stderr.decode('utf-8')\n assert stderr == ''\n assert results.returncode == 0\n for line in expected_lines:\n print(line)\n assert re.search(line, stdout, re.MULTILINE)\n\n # test 'mech global-status'\n command = \"mech global-status\"\n expected_lines = ['firstvbsmoke']\n results = subprocess.run(command, cwd=test_dir, shell=True, capture_output=True)\n stdout = results.stdout.decode('utf-8')\n stderr = results.stderr.decode('utf-8')\n assert stderr == ''\n assert results.returncode == 0\n for line in expected_lines:\n print(line)\n assert re.search(line, stdout, re.MULTILINE)\n\n # test 'mech list'\n commands = [\"mech ls\", \"mech list\"]\n expected_lines = ['firstvbsmoke', 'ubuntu', 'virtualbox']\n for command in commands:\n results = subprocess.run(command, cwd=test_dir, shell=True, capture_output=True)\n stdout = results.stdout.decode('utf-8')\n stderr = results.stderr.decode('utf-8')\n assert stderr == ''\n assert results.returncode == 0\n for line in expected_lines:\n print(line)\n assert re.search(line, stdout, re.MULTILINE)\n\n # test 'mech stop'\n command = \"mech stop\"\n expected_lines = ['Stopped']\n results = subprocess.run(command, cwd=test_dir, shell=True, capture_output=True)\n stdout = results.stdout.decode('utf-8')\n stderr = results.stderr.decode('utf-8')\n assert stderr == ''\n assert results.returncode == 0\n for line in expected_lines:\n print(line)\n assert re.search(line, stdout, re.MULTILINE)\n\n # test 'mech stop' again\n command = \"mech stop\"\n expected_lines = ['Not stopped']\n results = subprocess.run(command, cwd=test_dir, shell=True, capture_output=True)\n stdout = results.stdout.decode('utf-8')\n stderr = results.stderr.decode('utf-8')\n assert stderr == ''\n assert results.returncode == 0\n for line in expected_lines:\n print(line)\n assert re.search(line, stdout, re.MULTILINE)\n\n # test 'mech start'\n command = \"mech start\"\n expected_lines = ['started', 'Nothing to provision']\n results = subprocess.run(command, cwd=test_dir, shell=True, capture_output=True)\n stdout = results.stdout.decode('utf-8')\n stderr = results.stderr.decode('utf-8')\n assert stderr == ''\n assert results.returncode == 0\n for line in expected_lines:\n print(line)\n assert re.search(line, stdout, re.MULTILINE)\n\n # test 'mech pause'\n command = \"mech pause\"\n expected_lines = ['Paused']\n results = subprocess.run(command, cwd=test_dir, shell=True, capture_output=True)\n stdout = results.stdout.decode('utf-8')\n stderr = results.stderr.decode('utf-8')\n assert stderr == ''\n assert results.returncode == 0\n for line in expected_lines:\n print(line)\n assert re.search(line, stdout, re.MULTILINE)\n\n # test 'mech resume'\n command = \"mech resume\"\n expected_lines = ['resumed']\n results = subprocess.run(command, cwd=test_dir, shell=True, capture_output=True)\n stdout = results.stdout.decode('utf-8')\n stderr = results.stderr.decode('utf-8')\n assert stderr == ''\n assert results.returncode == 0\n for line in expected_lines:\n print(line)\n assert re.search(line, stdout, re.MULTILINE)\n\n # test 'mech suspend'\n command = \"mech suspend\"\n expected_lines = ['Not sure equivalent command on this platform']\n results = subprocess.run(command, cwd=test_dir, shell=True, capture_output=True)\n stdout = results.stdout.decode('utf-8')\n stderr = results.stderr.decode('utf-8')\n assert stderr == ''\n assert results.returncode == 0\n for line in expected_lines:\n print(line)\n assert re.search(line, stdout, re.MULTILINE)\n\n # test 'mech resume' after suspend\n command = \"mech resume\"\n expected_lines = ['started', 'Nothing to provision']\n results = subprocess.run(command, cwd=test_dir, shell=True, capture_output=True)\n stdout = results.stdout.decode('utf-8')\n stderr = results.stderr.decode('utf-8')\n assert stderr == ''\n assert results.returncode == 0\n for line in expected_lines:\n print(line)\n assert re.search(line, stdout, re.MULTILINE)\n\n # test 'mech ssh' (different forms)\n commands = [\"mech ssh -c 'uptime' firstvbsmoke\", \"mech ssh --command 'uptime' firstvbsmoke\"]\n expected_lines = ['load average']\n for command in commands:\n results = subprocess.run(command, cwd=test_dir, shell=True, capture_output=True)\n stdout = results.stdout.decode('utf-8')\n stderr = results.stderr.decode('utf-8')\n assert stderr == ''\n assert results.returncode == 0\n for line in expected_lines:\n print(line)\n assert re.search(line, stdout, re.MULTILINE)\n\n # test 'mech scp' to guest\n command = \"date > now; mech scp now firstvbsmoke:/tmp\"\n results = subprocess.run(command, cwd=test_dir, shell=True, capture_output=True)\n stdout = results.stdout.decode('utf-8')\n stderr = results.stderr.decode('utf-8')\n assert stdout == ''\n assert stderr == ''\n assert results.returncode == 0\n\n # test 'mech scp' from guest\n command = \"mech scp firstvbsmoke:/tmp/now .\"\n results = subprocess.run(command, cwd=test_dir, shell=True, capture_output=True)\n stdout = results.stdout.decode('utf-8')\n stderr = results.stderr.decode('utf-8')\n assert stdout == ''\n assert stderr == ''\n assert results.returncode == 0\n\n # test 'mech ip firstvbsmoke'\n command = \"mech ip firstvbsmoke\"\n expected = r\"[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\"\n results = subprocess.run(command, cwd=test_dir, shell=True, capture_output=True)\n stdout = results.stdout.decode('utf-8')\n stderr = results.stderr.decode('utf-8')\n assert stderr == ''\n assert results.returncode == 0\n assert re.search(expected, stdout)\n\n if platform.system() == \"Linux\":\n # test \"mech port\"\n command = \"mech port\"\n expected = r\"This command is not supported on this OS\"\n results = subprocess.run(command, cwd=test_dir, shell=True, capture_output=True)\n stdout = results.stdout.decode('utf-8')\n stderr = results.stderr.decode('utf-8')\n assert stdout == ''\n assert re.search(expected, stderr, re.MULTILINE)\n assert results.returncode == 1\n else:\n # test \"mech port\"\n command = \"mech port\"\n expected = r\"Not yet implemented on this platform\"\n results = subprocess.run(command, cwd=test_dir, shell=True, capture_output=True)\n stdout = results.stdout.decode('utf-8')\n stderr = results.stderr.decode('utf-8')\n assert stderr == ''\n assert re.search(expected, stdout, re.MULTILINE)\n assert results.returncode == 0\n\n # test \"mech box list\" (and alias)\n commands = [\"mech box list\", \"mech box ls\"]\n expected = r\"ubuntu\"\n for command in commands:\n results = subprocess.run(command, cwd=test_dir, shell=True, capture_output=True)\n stdout = results.stdout.decode('utf-8')\n stderr = results.stderr.decode('utf-8')\n assert stderr == ''\n assert results.returncode == 0\n assert re.search(expected, stdout, re.MULTILINE)\n\n # test \"mech snapshot list\" (and alias)\n commands = [\"mech snapshot list\", \"mech snapshot ls\"]\n expected = \"Not yet implemented\"\n for command in commands:\n results = subprocess.run(command, cwd=test_dir, shell=True, capture_output=True)\n stdout = results.stdout.decode('utf-8')\n stderr = results.stderr.decode('utf-8')\n assert stderr == ''\n assert results.returncode == 0\n assert re.search(expected, stdout, re.MULTILINE)\n\n # test \"mech snapshot save\"\n command = \"mech snapshot save snap1 firstvbsmoke\"\n expected = \"Not yet implemented\"\n results = subprocess.run(command, cwd=test_dir, shell=True, capture_output=True)\n stdout = results.stdout.decode('utf-8')\n stderr = results.stderr.decode('utf-8')\n assert stderr == ''\n assert results.returncode == 0\n assert re.search(expected, stdout)\n\n # test \"mech snapshot save\" with same args again\n # command = \"mech snapshot save snap1 firstvbsmoke\"\n # results = subprocess.run(command, cwd=test_dir, shell=True, capture_output=True)\n # stdout = results.stdout.decode('utf-8')\n # stderr = results.stderr.decode('utf-8')\n # assert stdout == ''\n # assert re.search('A snapshot with the name already exists', stderr)\n # assert results.returncode == 1\n\n # test \"mech snapshot list\" (and alias) again (now that we have one)\n # commands = [\"mech snapshot list\", \"mech snapshot ls\"]\n # expected = \"Total snapshots: 1\"\n # for command in commands:\n # results = subprocess.run(command, cwd=test_dir, shell=True, capture_output=True)\n # stdout = results.stdout.decode('utf-8')\n # stderr = results.stderr.decode('utf-8')\n # assert stderr == ''\n # assert results.returncode == 0\n # assert re.search(expected, stdout, re.MULTILINE)\n\n # test \"mech snapshot delete\"\n command = \"mech snapshot delete snap1 firstvbsmoke\"\n expected = \"Not yet implemented\"\n results = subprocess.run(command, cwd=test_dir, shell=True, capture_output=True)\n stdout = results.stdout.decode('utf-8')\n stderr = results.stderr.decode('utf-8')\n assert stderr == ''\n assert results.returncode == 0\n assert re.search(expected, stdout)\n\n # should be able to destroy\n command = \"mech destroy -f\"\n expected = \"Deleting\"\n results = subprocess.run(command, cwd=test_dir, shell=True, capture_output=True)\n stdout = results.stdout.decode('utf-8')\n stderr = results.stderr.decode('utf-8')\n assert stderr == ''\n assert results.returncode == 0\n assert re.search(expected, stdout)\n\n # clean up at the end\n utils.cleanup_dir_and_vms_from_dir(test_dir)", "def update_volumes():\n print 'do something useful here'", "def testStageNamePrefixSmoke(self):\n stage = self.ConstructStage()\n self.assertEqual(stage.StageNamePrefix(), 'Builder')", "def volumes(self):", "def _tag_volume():\n if dry:\n print('Would tag the new volume.')\n return True\n\n while True:\n # waiting for the volume to be up to tag it\n i = _fetch('vm')\n v = [x for x in i.volumes.all()]\n if len(v) == 0:\n # volumes should actually be already there once the IP is up\n time.sleep(1)\n else:\n for x in v:\n print('Tagging volume ' + x.id + '.')\n _tag_resource(x)\n break", "def test_volume_extend(self, volume, volumes_steps):\n volumes_steps.extend_volume(volume.name)", "def test_finish_resize_with_volumes(self):\n\n # create instance\n instance = self._create_fake_instance_obj()\n request_spec = objects.RequestSpec()\n\n # create volume\n volume = {'instance_uuid': None,\n 'device_name': None,\n 'id': uuids.volume,\n 'size': 200,\n 'attach_status': 'detached'}\n bdm = objects.BlockDeviceMapping(\n **{'context': self.context,\n 'source_type': 'volume',\n 'destination_type': 'volume',\n 'volume_id': uuids.volume,\n 'instance_uuid': instance['uuid'],\n 'device_name': '/dev/vdc'})\n bdm.create()\n\n # stub out volume attach\n def fake_volume_get(self, context, volume_id, microversion=None):\n return volume\n self.stub_out('nova.volume.cinder.API.get', fake_volume_get)\n\n def fake_volume_check_availability_zone(self, context,\n volume_id, instance):\n pass\n self.stub_out('nova.volume.cinder.API.check_availability_zone',\n fake_volume_check_availability_zone)\n\n def fake_get_volume_encryption_metadata(self, context, volume_id):\n return {}\n self.stub_out('nova.volume.cinder.API.get_volume_encryption_metadata',\n fake_get_volume_encryption_metadata)\n\n orig_connection_data = {\n 'target_discovered': True,\n 'target_iqn': 'iqn.2010-10.org.openstack:%s.1' % uuids.volume_id,\n 'target_portal': '127.0.0.0.1:3260',\n 'volume_id': uuids.volume_id,\n }\n connection_info = {\n 'driver_volume_type': 'iscsi',\n 'data': orig_connection_data,\n }\n\n def fake_init_conn(self, context, volume_id, session):\n return connection_info\n self.stub_out('nova.volume.cinder.API.initialize_connection',\n fake_init_conn)\n\n def fake_attach(self, context, volume_id, instance_uuid, device_name,\n mode='rw'):\n volume['instance_uuid'] = instance_uuid\n volume['device_name'] = device_name\n self.stub_out('nova.volume.cinder.API.attach', fake_attach)\n\n # stub out virt driver attach\n def fake_get_volume_connector(*args, **kwargs):\n return {}\n self.stub_out('nova.virt.fake.FakeDriver.get_volume_connector',\n fake_get_volume_connector)\n\n def fake_attach_volume(*args, **kwargs):\n pass\n self.stub_out('nova.virt.fake.FakeDriver.attach_volume',\n fake_attach_volume)\n\n # attach volume to instance\n self.compute.attach_volume(self.context, instance, bdm)\n\n # assert volume attached correctly\n self.assertEqual(volume['device_name'], '/dev/vdc')\n disk_info = db.block_device_mapping_get_all_by_instance(\n self.context, instance.uuid)\n self.assertEqual(len(disk_info), 1)\n for bdm in disk_info:\n self.assertEqual(bdm['device_name'], volume['device_name'])\n self.assertEqual(bdm['connection_info'],\n jsonutils.dumps(connection_info))\n\n # begin resize\n flavor = self.default_flavor\n instance.task_state = task_states.RESIZE_PREP\n instance.save()\n self.compute.prep_resize(self.context, instance=instance,\n flavor=flavor,\n image={}, request_spec=request_spec,\n filter_properties={}, node=None,\n clean_shutdown=True, migration=None,\n host_list=[])\n\n # fake out detach for prep_resize (and later terminate)\n def fake_terminate_connection(self, context, volume, connector):\n connection_info['data'] = None\n self.stub_out('nova.volume.cinder.API.terminate_connection',\n fake_terminate_connection)\n\n migration = objects.Migration.get_by_instance_and_status(\n self.context.elevated(),\n instance.uuid, 'pre-migrating')\n self.compute.resize_instance(self.context, instance=instance,\n migration=migration, image={},\n # TODO(stephenfin): Why a JSON string?\n flavor=jsonutils.to_primitive(flavor),\n clean_shutdown=True, request_spec=request_spec)\n\n # assert bdm is unchanged\n disk_info = db.block_device_mapping_get_all_by_instance(\n self.context, instance.uuid)\n self.assertEqual(len(disk_info), 1)\n for bdm in disk_info:\n self.assertEqual(bdm['device_name'], volume['device_name'])\n cached_connection_info = jsonutils.loads(bdm['connection_info'])\n self.assertEqual(cached_connection_info['data'],\n orig_connection_data)\n # but connection was terminated\n self.assertIsNone(connection_info['data'])\n\n # stub out virt driver finish_migration\n def fake(*args, **kwargs):\n pass\n self.stub_out('nova.virt.fake.FakeDriver.finish_migration', fake)\n\n instance.task_state = task_states.RESIZE_MIGRATED\n instance.save()\n\n # new initialize connection\n new_connection_data = dict(orig_connection_data)\n new_iqn = 'iqn.2010-10.org.openstack:%s.2' % uuids.volume_id,\n new_connection_data['target_iqn'] = new_iqn\n\n def fake_init_conn_with_data(self, context, volume, session):\n connection_info['data'] = new_connection_data\n return connection_info\n self.stub_out('nova.volume.cinder.API.initialize_connection',\n fake_init_conn_with_data)\n\n self.compute.finish_resize(self.context,\n migration=migration,\n disk_info={}, image={}, instance=instance,\n request_spec=request_spec)\n\n # assert volume attached correctly\n disk_info = db.block_device_mapping_get_all_by_instance(\n self.context, instance['uuid'])\n self.assertEqual(len(disk_info), 1)\n for bdm in disk_info:\n self.assertEqual(bdm['connection_info'],\n jsonutils.dumps(connection_info))\n\n # stub out detach\n def fake_detach(self, context, volume_uuid):\n volume['device_path'] = None\n volume['instance_uuid'] = None\n self.stub_out('nova.volume.cinder.API.detach', fake_detach)\n\n # clean up\n self.compute.terminate_instance(self.context, instance, [])", "def test_create_volume_from_snapshot(self, mock_ghn):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n snap = {'id': '1', 'name': 'volume1', 'display_name': '',\n 'volume_type_id': type_ref['id'], 'size': 10,\n 'provider_id': 'space_orig'}\n volume = {'id': '2', 'name': 'volume2', 'display_name': '',\n 'volume_type_id': type_ref['id'], 'size': 10}\n pid = self.driver.create_volume_from_snapshot(volume, snap)\n # We must copy entier underlying storage, ~12GB, not just 10GB\n self.assertEqual(11444 * units.Mi, self.dd_count)\n self.assertEqual('1M', self.bs)\n # Check space-create command\n expected = {'redundancy': '0', 'group': 'xanadu',\n 'name': 'volume2', 'mode': '0777',\n 'user': 'kane', 'net': 'net1',\n 'storageserver': 'stor1:gbd0,stor2:gbd0,',\n 'size': '12'}\n self.assertDictMatch(expected, self.created)\n # Check the returned provider\n expected_pid = {'provider_id': 'volume2'}\n self.assertDictMatch(expected_pid, pid)", "def vol_up_and_validate(self):\n self.logger.info('Increasing volume')\n before_vol = self.dut.volume('Up', 1)\n time.sleep(2)\n after_vol = self.dut.volume('Up', 1)\n if not after_vol or not before_vol or after_vol <= before_vol:\n self.logger.error(\n 'Unable to increase the volume. Before: %s. After: %s' %\n (before_vol, after_vol))\n raise TestActsError('error increasing volume')", "def volup(self, raiseby=1):\n command + 'volup ' + str(raiseby)\n self.run_command(command)", "def ft_volumeslice( slice_name ):\n print \"slice: %s\" % slice_name\n \n volumes = get_volumeslice_volume_names( slice_name )\n \n print \"volumes mounted in slice %s:\" % slice_name\n for v in volumes:\n print \" %s:\" % v\n \n vs = get_volumeslice( v, slice_name )\n \n print \" %s\" % dir(vs)", "def test_edit_volume(self, volume, volumes_steps):\n new_name = volume.name + ' (updated)'\n with volume.put(name=new_name):\n volumes_steps.edit_volume(volume_name=volume.name,\n new_volume_name=new_name)", "def assign_volume_letters():\n remove_volume_letters()\n\n # Write script\n script = []\n for vol in get_volumes():\n script.append('select volume {}'.format(vol['Number']))\n script.append('assign')\n\n # Run\n run_diskpart(script)", "def test_backend_name_reporting(self):\n for volume_id in self.volume_id_list_without_prefix:\n self._test_backend_name_reporting_by_volume_id(volume_id)", "def test_change_volume_type(self, create_volume, volumes_steps):\n volume_name = generate_ids('volume').next()\n create_volume(volume_name, volume_type=None)\n volumes_steps.change_volume_type(volume_name)", "def testGenerateVolumesMountpoints(self):\n self.maxDiff = None\n de_object = de.DockerExplorerTool()\n de_object._explorer = self.explorer_object\n container_obj = de_object._explorer.GetContainer(\n '712909b5ab80d8785841f12e361c218a2faf5365f9ed525f2a0d6b6590ba89cb')\n\n commands = container_obj.storage_object._MakeVolumeMountCommands(\n container_obj, '/mnt')\n commands = [' '.join(x) for x in commands]\n expected_commands = [(\n '/bin/mount --bind -o ro '\n 'test_data/docker/volumes/f5479c534bbc6e2b9861973c2fbb4863ff5b7b5843c098d7fb1a027fe730a4dc/_data '\n '/mnt/opt/vols/volume')]\n self.assertEqual(expected_commands, commands)", "def test_backup_info_with_start_end_flag(self):\n if self.bkinfo_date_start_ago:\n conn = RemoteMachineShellConnection(self.backupset.backup_host)\n start_date_cmd = \"date --date=\\\"{} days ago\\\" '+%d-%m-%Y' \"\\\n .format(self.bkinfo_date_start_ago)\n output, error = conn.execute_command(start_date_cmd)\n start_date = output[0]\n end_date_cmd = \"date '+%d-%m-%Y' \"\n output, error = conn.execute_command(end_date_cmd)\n end_date = output[0]\n conn.disconnect()\n\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size,\n end=self.num_items)\n initial_gen = copy.deepcopy(gen)\n self.log.info(\"Start to load items to all buckets\")\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.log.info(\"Create backup repo \")\n self.backup_create()\n for i in range(1, self.backupset.number_of_backups + 1):\n self.backup_cluster()\n self.log.info(\"done running backup\")\n\n if self.bkinfo_start_end_with_bkname:\n bkname_start_index = int(self.bkinfo_start_end_with_bkname.split(\":\")[0])\n bkname_start = self.backups[bkname_start_index]\n bkname_end_index = int(self.bkinfo_start_end_with_bkname.split(\":\")[1])\n bkname_end = self.backups[bkname_end_index]\n\n if self.bkinfo_date_start_ago:\n o, e = self.backup_info(start=start_date,end=end_date)\n elif self.bkinfo_start_end_with_bkname:\n o, e = self.backup_info(start=bkname_start,end=bkname_end)\n else:\n o, e = self.backup_info(start=self.bkinfo_start,end=self.bkinfo_end)\n if o and o[0]:\n bk_info = json.loads(o[0])\n bk_info = bk_info[\"backups\"]\n if self.debug_logs:\n print(\"\\nbk info : \", bk_info)\n print(\"\\n bkinfo len: \", len(bk_info))\n print(\"\\nbk info date : \", bk_info[0][\"date\"])\n print(\"\\nbk info type : \", bk_info[0][\"type\"])\n print(\"\\nnubmer backup : \", self.backups)\n if self.bkinfo_start == 1 and self.bkinfo_end == 1:\n if \"FULL\" not in bk_info[0][\"type\"]:\n self.fail(\"First backup is not full backup\")\n elif self.bkinfo_start > 1 and self.bkinfo_end > 1:\n if \"INCR\" not in bk_info[0][\"type\"]:\n self.fail(\"> 0th backup is not incr backup\")\n if self.bkinfo_date_start_ago:\n if len(bk_info) != len(self.backups):\n self.fail(\"bkrs info failed to show all backups today\")\n elif self.bkinfo_start_end_with_bkname:\n if len(bk_info) != (bkname_end_index - bkname_start_index + 1):\n self.fail(\"bkrs info does not show correct nubmer of backups with backup name\")\n elif len(bk_info) != (self.bkinfo_end - self.bkinfo_start + 1):\n self.fail(\"bkrs info does not show correct nubmer of backups\")" ]
[ "0.79183763", "0.6501542", "0.58291066", "0.54706204", "0.54484326", "0.54408056", "0.543602", "0.53255093", "0.5283495", "0.5239277", "0.5198263", "0.5153576", "0.5123313", "0.51021636", "0.5099367", "0.5072275", "0.5065416", "0.5032697", "0.5027716", "0.5013804", "0.49972054", "0.4988625", "0.49863377", "0.4971087", "0.49622416", "0.4958046", "0.49554622", "0.4943241", "0.4903162", "0.48971206" ]
0.9011076
0
Test case for volumes_volname_stop_post
def test_volumes_volname_stop_post(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_volumes_volname_start_post(self):\n pass", "def test_volumes_post(self):\n pass", "def test_stop(self):\n\n message = {\"method\": \"stop\",\n \"params\": {\"elem\": self.container_running}}\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"stop\")\n self.assertIsInstance(response[\"result\"], list)\n\n container_name = \"/\" + self.container_running\n\n containers = {i[0]: i[1] for i in response[\"result\"]}\n self.assertIn(container_name, containers.keys(),\n \"Container not found\")\n\n find_stop_status = containers[container_name].lower().find(\"exited\")\n\n self.assertEqual(find_stop_status, 0, \"Container has not stopped\")", "def test_aws_service_api_volumes_post(self):\n pass", "def test_aws_service_api_volume_delete(self):\n pass", "def volume_stop_or_start(vol_name, op):\n\n return_dict = None\n try:\n cmd = 'gluster --mode=script volume %s %s --xml' % (op, vol_name)\n return_dict, err = xml_parse.run_gluster_command(cmd)\n if err:\n raise Exception(err)\n except Exception, e:\n return None, 'Error stopping/starting volume : %s' % str(e)\n else:\n return return_dict, None", "def stopTest(self, test):", "def test_delete__volume(self):\n arglist = [\n '--volume',\n self.projects[0].id,\n ]\n verifylist = [\n ('service', 'volume'),\n ('project', self.projects[0].id),\n ]\n\n parsed_args = self.check_parser(self.cmd, arglist, verifylist)\n\n result = self.cmd.take_action(parsed_args)\n\n self.assertIsNone(result)\n self.projects_mock.get.assert_called_once_with(self.projects[0].id)\n self.compute_quotas_mock.delete.assert_not_called()\n self.volume_quotas_mock.delete.assert_called_once_with(\n self.projects[0].id,\n )\n self.network_mock.delete_quota.assert_not_called()", "def test_volumes_get(self):\n pass", "def post_stop(self):", "def stopTestRun(self):", "def test_create_drives_drive_stopfail_item(self):\n pass", "def vol_down_and_validate(self):\n self.logger.info('Decreasing volume')\n before_vol = self.dut.volume('Down', 1)\n time.sleep(2)\n after_vol = self.dut.volume('Down', 1)\n if not after_vol or not before_vol or after_vol >= before_vol:\n self.logger.error(\n 'Unable to decrease the volume. Before: %s. After: %s' %\n (before_vol, after_vol))\n raise TestActsError('error decreasing volume')", "def vm_stop(self, params: dict) -> Tuple[\"Status\", dict]:", "def need_stop(self, path):", "def _stop(self):", "def stop():", "def stop():", "def stop():", "def stop():", "def _prepare_to_stop(self):\n pass", "def RunStop(self, zone=None):\n if zone is None:\n zone = self.zone\n try:\n self.run_instance_params['image'] = self.tester.ec2.get_emi(emi=self.args.emi,\n root_device_type=\"ebs\",\n basic_image=True)\n except Exception, e:\n self.RegisterImage()\n self.run_instance_params['image'] = self.tester.ec2.get_emi(emi=self.args.emi,\n root_device_type=\"ebs\",\n basic_image=True)\n if not self.volume_1:\n self.volume_1 = self.tester.ec2.create_volume(zone=self.zone, size=2)\n if not self.volume_2:\n self.volume_2 = self.tester.ec2.create_volume(zone=self.zone, size=1)\n\n if self.reservation:\n self.tester.ec2.terminate_instances(self.reservation)\n self.reservation = self.tester.ec2.run_image(**self.run_instance_params)\n ## Ensure that we can attach and use a volume\n for instance in self.reservation.instances:\n instance.attach_volume(self.volume_1, self.test_volume_1_path)\n instance.attach_volume(self.volume_2, self.test_volume_2_path)\n self.tester.ec2.stop_instances(self.reservation)\n for instance in self.reservation.instances:\n if instance.ip_address or instance.private_ip_address:\n raise Exception(\"Instance had a public \" + str(instance.ip_address) + \" private \" + str(instance.private_ip_address) )\n if instance.block_device_mapping[self.test_volume_1_path] is None:\n raise Exception(\"DBM path is invalid\")\n if self.volume_1.id != instance.block_device_mapping[self.test_volume_1_path].volume_id:\n raise Exception(\"Volume id does not match\")", "def test_stop_machine(self, pretty_print, owner_api_token):\n machine = setup_data.get('stop_machine', {}).get(\n 'machine') or setup_data.get('machine') or 'my-machine'\n uri = MIST_URL + \\\n '/api/v2/machines/{machine}/actions/stop'.format(machine=machine)\n request = MistRequests(\n api_token=owner_api_token,\n uri=uri)\n request_method = getattr(request, 'POST'.lower())\n response = request_method()\n if 'stop_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\n else:\n assert_response_ok(response)\n assert poll(\n api_token=owner_api_token,\n uri=setup_data['amazon_machine_uri'],\n data={'state': 'stopped', 'actions': {'resize': True}},\n timeout=_setup_module.DEFAULT_TIMEOUT)\n print('Success!!!')", "def _on_stop(self, _):\n self.unit.status = MaintenanceStatus(\"Pod is terminating.\")\n logger.info(\"Pod is terminating.\")", "def _check_stop(self, guest_obj, stop_cmd):\n # call method and verify if it executed cmd in ssh shell\n guest_obj.stop()\n self._mock_ssh_shell.run.assert_called_with(stop_cmd)", "def stop_procedure(self):\n pass", "def process_test_stop(self, config, results, result_id, db):\n pass", "def stopEngines():\n pass", "def test_disconnect_volume_no_op_other_state(self):\n inst = mock.MagicMock()\n inst.task_state = task_states.RESUMING\n self.vol_drv.disconnect_volume(self.adpt, 'host_uuid', 'vm_uuid',\n inst, mock.ANY)\n self.assertEqual(0, self.adpt.read.call_count)", "def Stop(self, *_):\n self.Log('Stopping...')\n self._stop = True" ]
[ "0.77427644", "0.64347535", "0.5892416", "0.58504677", "0.57865006", "0.5761441", "0.568179", "0.56483597", "0.55805796", "0.5577241", "0.55441153", "0.55238473", "0.5518342", "0.54867876", "0.54762626", "0.54633987", "0.5460693", "0.5460693", "0.5460693", "0.5460693", "0.54547787", "0.5431488", "0.54200995", "0.54130256", "0.5399663", "0.5379907", "0.5368426", "0.5366909", "0.53616875", "0.53466403" ]
0.92394954
0
Get all valid pairs of tape in rect form given a list of OpenCV contours. Using the angles and center coordinates associated with each of the contours, possible pairs of tape are identified. However, there is no verification done to ensure that contours are valid pieces of tape, as the function assumes that they are valid. If no valid pairs are detected in the given list of contours, an empty list is returned in place of a list that ressembles [(rect1, rect2), (rect3, rect4), ... ]
def get_pair_rects(contours): rect_pairs = [] for index, cnt in enumerate(contours): # Rotated rect - ( center (x,y), (width, height), angle of rotation ) rect = cv2.minAreaRect(cnt) center_x, center_y = rect[0] rect_angle = -round(rect[2], 2) if rect_angle > 45.0: # Iterate through all of the potential matches min_x_dist = min_rect = min_index = None for pot_index, pot_match in enumerate(contours): if np.array_equal(pot_match, cnt): continue match_rect = cv2.minAreaRect(pot_match) # Check if match is to the right of the contour if match_rect[0][0] > rect[0][0] and abs( match_rect[2] - rect_angle) > ANGLE_TOLERANCE_DEG: x_distance = match_rect[0][0] - rect[0][0] if min_x_dist is None or x_distance < min_x_dist: min_x_dist = x_distance min_rect = match_rect min_index = pot_index if min_rect is not None: rect_pairs.append((rect, min_rect)) np.delete(contours, index) np.delete(contours, min_index) return rect_pairs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bound_shapes(contours):\r\n\r\n contours_poly = [None]*len(contours)\r\n boundRect = [None]*len(contours)\r\n centers = [None]*len(contours)\r\n radius = [None]*len(contours)\r\n for i, c in enumerate(contours):\r\n contours_poly[i] = cv2.approxPolyDP(c, 3, True)\r\n boundRect[i] = cv2.boundingRect(contours_poly[i])\r\n centers[i], radius[i] = cv2.minEnclosingCircle(contours_poly[i])\r\n \r\n return (contours_poly, boundRect, centers, radius)", "def find_squares( contours, debug=False ):\r\n #=================================================================\r\n # The Minimum and Maximum rations for width vs height for the goal\r\n # based on experimental results goal is approx 1.5:1\r\n #=================================================================\r\n MIN_RATIO = 1.3\r\n MAX_RATIO = 1.8\r\n ret = []\r\n\r\n for shape in contours:\r\n x, y, w, h = cv2.boundingRect( shape )\r\n w_h_ratio = float( w ) / float( h )\r\n if debug:\r\n print \"Area\", (w * h)\r\n print \"Width \", w\r\n print \"Height\", h\r\n if MIN_RATIO < w_h_ratio and w_h_ratio < MAX_RATIO:\r\n ret.append( shape )\r\n\r\n return( ret )", "def find_targets(contours, frame):\n # If there aren't any contours present, return frame without drawing\n if len(contours) == 0:\n return frame\n # Copy frame, TODO why do we need to do this?\n image = frame.copy()\n screen_height, screen_width, _ = image.shape;\n # TODO: Why subtract?\n center_x = screen_width / 2 - .5\n center_y = screen_height / 2 - .5\n # List for storing found targets\n targets = []\n\n if len(contours) >= 2:\n # Sort contours in descending order by size\n contours.sort(key=lambda contour: cv2.contourArea(contour), reverse=True)\n\n valid_contours = []\n for contour in contours:\n # Calculate areas of contour\n contour_area = cv2.contourArea(contour)\n if contour_area >= MIN_CONTOUR_SIZE:\n # Get moments of contour for centroid calculations\n moments = cv2.moments(contour)\n # Find centroid of contour\n if moments[\"m00\"] != 0:\n cx = int(moments[\"m10\"] / moments[\"m00\"])\n cy = int(moments[\"m01\"] / moments[\"m00\"])\n else:\n cx, cy = 0, 0\n\n ### CALCULATE CONTOUR ROTATION BY FITTING ELLIPSE ###\n rotation = get_ellipse_rotation(image, contour)\n\n ### DRAW CONTOUR ###\n # Draw white circle at center of contour\n cv2.circle(image, (cx, cy), 6, (255, 255, 255))\n\n # Draw contour in green\n cv2.drawContours(image, [contour], 0, (0, 200, 0), 1)\n\n # Append important info to array\n valid_contours.append({\"cx\": cx, \"cy\": cy, \"rotation\": rotation})\n\n # Sort array based on coordinates (left to right) to make sure contours are adjacent\n valid_contours.sort(key=lambda contour: contour[\"cx\"])\n\n # Find targets from contours\n for i in range(len(valid_contours) - 1):\n # Check rotation of adjacent contours\n tilt_left = valid_contours[i][\"rotation\"]\n tilt_right = valid_contours[i + 1][\"rotation\"]\n\n # Contour coordinates\n cx_left = valid_contours[i][\"cx\"]\n cx_right = valid_contours[i + 1][\"cx\"]\n cy_left = valid_contours[i][\"cy\"]\n cy_right = valid_contours[i + 1][\"cy\"]\n\n # If contour angles are opposite\n # Negative tilt -> Rotated to the right\n # NOTE: if using rotated rect (min area rectangle), negative tilt means rotated to left\n # If left contour rotation is tilted to the left then skip iteration\n # If right contour rotation is tilted to the right then skip iteration\n if (len(valid_contours) == 2) or (np.sign(tilt_left) != np.sign(tilt_right) and\n not (tilt_left > 0 and cx_left < cx_right or tilt_right > 0 and cx_right < cx_left)):\n\n target_cx = (cx_left + cx_right) / 2\n target_cy = (cy_left + cy_right) / 2\n\n target_yaw = calculate_yaw(target_cx, center_x)\n target_pitch = calculate_pitch(target_cy, center_y)\n\n targets.append({\"cx\": target_cx,\n \"cy\": target_cy,\n \"yaw\": target_yaw,\n \"pitch\": target_pitch})\n\n # Check if there are targets seen\n if len(targets) > 0:\n # Get target with smallest yaw\n nearest_target = min(targets, key=lambda target: math.fabs(target[\"yaw\"]))\n # Write yaw of target in corner of image\n cv2.putText(image, \"Yaw: %.3f\" % nearest_target[\"yaw\"], (1, 12), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255))\n # Draw line at center of target\n cv2.line(image, (int(nearest_target[\"cx\"]), screen_height), (int(nearest_target[\"cx\"]), 0), (255, 0, 0), 1)\n # Draw line at center of screen\n cv2.line(image, (round(center_x), screen_height), (round(center_x), 0), (255, 255, 255), 1)\n\n # Send our final data to NetworkTables\n table.putBoolean(\"target_present\", True)\n table.putNumber(\"targets_seen\", len(targets))\n table.putNumber(\"target_yaw\", nearest_target[\"yaw\"])\n table.putNumber(\"target_pitch\", nearest_target[\"pitch\"])\n else:\n table.putBoolean(\"target_present\", False)\n table.putNumber(\"targets_seen\", 0)\n table.putNumber(\"target_yaw\", 0)\n table.putNumber(\"target_pitch\", 0)\n table.putNumber(\"target_distance\", 0)\n\n return image", "def get_boxes(found, contours):\n boxes = []\n for i in found:\n rect = cv2.minAreaRect(contours[i])\n box = np.int0(cv2.cv.BoxPoints(rect))\n box = map(tuple, box)\n boxes.append(box)\n return boxes", "def analyzeContours(contours):\n if len(contours[0]) != 4 and len(contours[0]) != 8:\n return 0\n\n first, second, prev = [1, 1], [1, 1], 0\n for i, val in enumerate(numpy.nditer(contours[0])):\n if i % 2:\n second = [prev, val]\n else:\n first = [prev, val]\n\n # compare pair to the previous pair\n for num in first:\n if num in second:\n break\n else:\n return 0\n\n prev = val\n\n return 1", "def find_cards(thresh_image):\n\n # Find contours and sort their indices by contour size\n cnts,hier = cv2.findContours(thresh_image,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n index_sort = sorted(range(len(cnts)), key=lambda i : cv2.contourArea(cnts[i]),reverse=True)\n\n print(\"Contour length\", len(cnts))\n\n # If there are no contours, do nothing\n if len(cnts) == 0:\n return [], []\n\n # Otherwise, initialize empty sorted contour and hierarchy lists\n cnts_sort = []\n hier_sort = []\n cnt_is_card = np.zeros(len(cnts),dtype=int)\n\n # Fill empty lists with sorted contour and sorted hierarchy. Now,\n # the indices of the contour list still correspond with those of\n # the hierarchy list. The hierarchy array can be used to check if\n # the contours have parents or not.\n for i in index_sort:\n cnts_sort.append(cnts[i])\n hier_sort.append(hier[0][i])\n\n # Determine which of the contours are cards by applying the\n # following criteria: 1) Smaller area than the maximum card size,\n # 2), bigger area than the minimum card size, 3) have no parents,\n # and 4) have four corners\n\n for i in range(len(cnts_sort)):\n size = cv2.contourArea(cnts_sort[i])\n peri = cv2.arcLength(cnts_sort[i],True)\n approx = cv2.approxPolyDP(cnts_sort[i],0.01*peri,True)\n\n # (size < CARD_MAX_AREA) and (size > CARD_MIN_AREA)\n # and\n # and (hier_sort[i][3] == -1)\n # and (len(approx) == 4)\n if ((size > CARD_MIN_AREA)):\n print('[inside loop]',size)\n cnt_is_card[i] = 1\n\n return cnts_sort, cnt_is_card", "def find_rects(image: np.ndarray) -> List[np.ndarray]:\n\n gray = cv.cvtColor(image.copy(), cv.COLOR_RGB2GRAY)\n gray = cv.GaussianBlur(gray, (5, 5), 0)\n #edged = cv.Canny(gray, 70, 180) # this numbers is hand picked guess from a few photos\n edged = auto_canny(gray)\n\n contours = cv.findContours(edged.copy(), cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)\n contours = contours[0] if len(contours) == 2 else contours[1]\n contours.sort(key=cv.contourArea, reverse=True)\n contours = [cnt for cnt in contours if cv.contourArea(cnt) > 15] # 15px contour area, basically cnt>=4x4\n\n rects = list(map(cv.minAreaRect, contours))\n boxes = list(map(lambda r: Rect.from_cvrect(*r[0], *r[1]), rects))\n\n boxes = Rect.nms_merge(boxes)\n\n return boxes or list()", "def find_joints(contours, vertical, horizontal):\n\tjoints = np.multiply(vertical, horizontal)\n\ttables = {}\n\tfor c in contours:\n\t\tx, y, w, h = c\n\t\troi = joints[y : y + h, x : x + w]\n\t\ttry:\n\t\t\t__, jc, __ = cv2.findContours(\n\t\t\t\troi.astype(np.uint8), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE\n\t\t\t)\n\t\texcept ValueError:\n\t\t\t# for opencv backward compatibility\n\t\t\tjc, __ = cv2.findContours(\n\t\t\t\troi.astype(np.uint8), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE\n\t\t\t)\n\t\tif len(jc) <= 4: # remove contours with less than 4 joints\n\t\t\tcontinue\n\t\tjoint_coords = []\n\t\tfor j in jc:\n\t\t\tjx, jy, jw, jh = cv2.boundingRect(j)\n\t\t\tc1, c2 = x + (2 * jx + jw) // 2, y + (2 * jy + jh) // 2\n\t\t\tjoint_coords.append((c1, c2))\n\t\ttables[(x, y + h, x + w, y)] = joint_coords\n\n\treturn tables", "def __filter_contours(input_contours, min_area, min_perimeter, min_width, max_width,\n min_height, max_height, solidity, max_vertex_count, min_vertex_count,\n min_ratio, max_ratio):\n output = []\n for contour in input_contours:\n x,y,w,h = cv2.boundingRect(contour)\n if (w < min_width or w > max_width):\n continue\n if (h < min_height or h > max_height):\n continue\n area = cv2.contourArea(contour)\n if (area < min_area):\n continue\n if (cv2.arcLength(contour, True) < min_perimeter):\n continue\n hull = cv2.convexHull(contour)\n solid = 100 * area / max(cv2.contourArea(hull),1)\n if (solid < solidity[0] or solid > solidity[1]):\n continue\n if (len(contour) < min_vertex_count or len(contour) > max_vertex_count):\n continue\n ratio = (float)(w) / h\n if (ratio < min_ratio or ratio > max_ratio):\n continue\n output.append(contour)\n return output", "def splitContours(contours):\n split_contours = []\n for contour in contours:\n c = contour.reshape(-1, 2)\n line_segments = splitLine(c)\n for seg in line_segments:\n # Turn it back to its original shape, so we can add it back to contours\n new_contour = seg.reshape(-1,1,2)\n # Dont add empty contours to our new list of contours\n if new_contour.size != 0:\n split_contours.append(new_contour)\n\n return split_contours", "def __filter_contours(input_contours, min_area, min_perimeter, min_width, max_width,\n min_height, max_height, solidity, max_vertex_count, min_vertex_count,\n min_ratio, max_ratio):\n output = []\n for contour in input_contours:\n x, y, w, h = cv2.boundingRect(contour)\n if (w < min_width or w > max_width):\n continue\n if (h < min_height or h > max_height):\n continue\n area = cv2.contourArea(contour)\n if (area < min_area):\n continue\n if (cv2.arcLength(contour, True) < min_perimeter):\n continue\n hull = cv2.convexHull(contour)\n solid = 100 * area / cv2.contourArea(hull)\n if (solid < solidity[0] or solid > solidity[1]):\n continue\n if (len(contour) < min_vertex_count or len(contour) > max_vertex_count):\n continue\n ratio = (float)(w) / h\n if (ratio < min_ratio or ratio > max_ratio):\n continue\n output.append(contour)\n return output", "def find_contours(vertical, horizontal):\n\tmask = vertical + horizontal\n\n\ttry:\n\t\t__, contours, __ = cv2.findContours(\n\t\t\tmask.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE\n\t\t)\n\texcept ValueError:\n\t\t# for opencv backward compatibility\n\t\tcontours, __ = cv2.findContours(\n\t\t\tmask.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE\n\t\t)\n\t# sort in reverse based on contour area and use first 10 contours\n\tcontours = sorted(contours, key=cv2.contourArea, reverse=True)[:10]\n\n\tcont = []\n\tfor c in contours:\n\t\tc_poly = cv2.approxPolyDP(c, 3, True)\n\t\tx, y, w, h = cv2.boundingRect(c_poly)\n\t\tcont.append((x, y, w, h))\n\treturn cont", "def get_contours_points(found, contours):\n contours_points = []\n for i in found:\n c = contours[i]\n for sublist in c:\n for p in sublist:\n contours_points.append(p)\n return contours_points", "def triPoints(rect, orientation): \r\n p1 = (rect.center[0]+orientation[0]*rect.size[0]/3.,\r\n rect.center[1]+orientation[1]*rect.size[1]/3.)\r\n p2 = (rect.center[0]-orientation[0]*rect.size[0]/4.,\r\n rect.center[1]-orientation[1]*rect.size[1]/4.)\r\n orthdir = (orientation[1], -orientation[0])\r\n p2a = (p2[0]-orthdir[0]*rect.size[0]/6.,\r\n p2[1]-orthdir[1]*rect.size[1]/6.)\r\n p2b = (p2[0]+orthdir[0]*rect.size[0]/6.,\r\n p2[1]+orthdir[1]*rect.size[1]/6.) \r\n return [(p[0], p[1]) for p in [p1, p2a, p2b]]", "def generate_question_contours(contours):\n \t# loop over the contours\n \tquestionContours = []\n \tfor contour in contours:\n \t\t# compute the bounding box of the contour, then use the\n \t\t# bounding box to derive the aspect ratio\n \t\t# boundingRect fits a straight (non-rotated) rectangle that bounds the countour\n \t\t# (x,y) is the top-left coordinate of the rectangle and (w,h) are its width and height\n \t\t(x, y, w, h) = cv2.boundingRect(contour)\n \t\tar = w / float(h)\n \t\t# taking the area of our thresholds will help us eliminate the numbers\n \t\t# in front of the bubbles like '10' and '6'\n \t\tarea = cv2.contourArea(contour)\n \t\t# area is crucial to get the right contours. You will probably need to adjust these parameters\n \t\t# my photos were taken from about 1.5 feet above the piece of paper, with the paper taking up\n \t\t# most of the photo\n \t\tif w >= 11 and w <= 20 and h >= 11 and h <= 20 and ar >= 0.6 and ar <= 1.34 and area >= 150:\n \t\t\tquestionContours.append(contour)\n \treturn questionContours", "def get_panels(rectangles):\n\n pairs = []\n for rect in rectangles:\n if (2 * rect[1][0] < rect[1][1]) or (rect[1][0] > 2 * rect[1][1]):\n if 2 * rect[1][0] < rect[1][1]:\n long_dim1 = 0\n elif rect[1][0] > 2 * rect[1][1]:\n long_dim1 = 1\n\n box = cv2.boxPoints(rect)\n box2 = []\n min_angle = 10;\n for rect2 in rectangles:\n if 2 * rect[1][0] < rect[1][1]:\n long_dim2 = 0\n elif rect[1][0] > 2 * rect[1][1]:\n long_dim2 = 1\n if (rect2 != rect) and (abs(rect[2] - rect2[2]) < min_angle) and (long_dim1 == 1 and long_dim2 == 1):\n box2 = cv2.boxPoints(rect2)\n min_angle = abs(rect[2] - rect2[2])\n\n if len(box2) != 0:\n box_pair = (box, box2)\n pairs.append(box_pair)\n\n return pairs", "def get_bricks(self, contours):\n bricks = []\n for cnt in contours:\n epsilon = 0.04*cv2.arcLength(cnt,True)\n approx = cv2.approxPolyDP(cnt,epsilon,True) \n \n if len(approx) >= 4:\n rect = cv2.minAreaRect(approx)\n area = cv2.contourArea(approx)\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n\n if area > 600 and area < 4000:\n\n brick = Brick()\n area = np.int0(area)\n center = np.int0(rect[0])\n angle = np.int0(rect[2])\n\n brick.set_area(area)\n brick.set_center(center)\n brick.set_angle(angle)\n brick.set_box(box)\n\n bricks.append(brick)\n\n # elif len(approx) > 4:\n # (x,y),radius = cv2.minEnclosingCircle(cnt)\n # center = (int(x),int(y))\n # radius = int(radius)\n # area = radius*radius*math.pi\n\n # if area > 600 and area < 2000:\n\n # brick = Brick()\n # area = np.int0(area)\n \n # brick.set_area(area)\n # brick.set_center(center)\n # brick.set_radius(radius)\n\n # bricks.append(brick)\n\n \n \n return bricks", "def get_fits(holes, triangles):\n fits = []\n for hole in holes:\n hole_fits = []\n\n for i1, t1 in enumerate(triangles):\n for i2, t2 in enumerate(triangles[i1 + 1:], i1 + 1):\n if hole in add_triangles(t1, t2):\n hole_fits.append((i1, i2))\n\n fits.append(hole_fits)\n\n return fits", "def get_position_patterns(contours, hierarchy):\n found = []\n for i in range(len(contours)):\n k = i\n c = 0\n while hierarchy[k][2] != -1:\n k = hierarchy[k][2]\n c += 1\n if c >= 5:\n found.append(i)\n return found", "def findSquares(img,minSize = 2000,maxAngle = 1):\n squares = []\n contours, hierarchy = cv2.findContours(img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n for cnt in contours:\n cnt_len = cv2.arcLength(cnt, True)\n cnt = cv2.approxPolyDP(cnt, 0.08*cnt_len, True)\n if len(cnt) == 4 and cv2.contourArea(cnt) > minSize and cv2.isContourConvex(cnt):\n cnt = cnt.reshape(-1, 2)\n max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)])\n if max_cos < maxAngle:\n squares.append(cnt)\n return squares", "def find_tape():\n\n _, frame = CAP.read()\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(hsv, color_lower, color_upper)\n _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE,\n cv2.CHAIN_APPROX_SIMPLE)\n\n # Find all valid pair rects, and reutrn if none found\n pair_rects = get_pair_rects(contours)\n if len(pair_rects) == 0:\n return\n\n # If found, continue on and post results\n center = closest_center(pair_rects)\n\n to_send = '{}:{}\\n'.format(\n round(time.time(), 3), round(degrees(horizontal_angle(center[0])), 3))\n print(to_send)\n s.send(bytearray(to_send, 'utf-8'))", "def getContours(img,iteration):\n nP, IDrange = upDate(iteration)\n imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n retvalth, imgthreshold = cv2.threshold(imgray, 50, 255, cv2.THRESH_BINARY)\n kernel = np.ones((nP, nP), np.uint8)\n imgdilation = cv2.dilate(imgthreshold, kernel, iterations=2)\n contours = []\n # two vertion of cv2 for findcontours-> (old vertion): imgcontours, contours, hierarchy = cv2.findContours(imgdilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n #contours, hierarchy = cv2.findContours(imgdilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n if iteration == 2 :\n contours, hierarchy = cv2.findContours(imgdilation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n elif iteration == 3:\n contours, hierarchy = cv2.findContours(imgdilation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n else:\n contours, hierarchy = cv2.findContours(imgdilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n ##imgcontours, contours, hierarchy = cv2.findContours(imgdilation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n return contours", "def GetSubContoursByFrame(watershed, allValsByFrame):\n scListByFrame = []\n for frame in range(len(watershed)):\n scList = []\n for v in allValsByFrame[frame]:\n boundingRect = ImageContour.GetBoundingRect(watershed[frame], v)\n # No longer needed: #contour,turns,vals = ImageContour.GetContour(watershed[0],v,boundingRect=boundingRect,byNeighbor=True)\n (\n perimeterVals,\n perimeterList,\n scPoints,\n ) = ImageContour.GetPerimeterByNeighborVal(\n watershed[frame], v, boundingRect=boundingRect, getSubContours=True\n )\n scPointsAdj = [\n (np.array(scp) + [boundingRect[0][0], boundingRect[1][0]]).tolist()\n for scp in scPoints\n ] # Will need to - 0.5 to line up on an overlay\n if len(perimeterList) > 0:\n scList += [\n SubContour(\n points=scPointsAdj[i],\n numPoints=len(scPointsAdj[i]),\n adjusted_length=perimeterList[i],\n values=tuple(sorted([v, perimeterVals[i]])),\n startPointValues=GetValuesAroundSCPoint(\n watershed[frame], scPointsAdj[i][0]\n ),\n endPointValues=GetValuesAroundSCPoint(\n watershed[frame], scPointsAdj[i][-1]\n ),\n )\n for i in range(len(perimeterVals))\n ]\n scList.sort(key=lambda x: x.values)\n for i in range(len(scList) - 1, 0, -1):\n # if 2 subcoutours are the same, keep only the one with the minimum length computation\n if scList[i - 1].values == scList[i].values:\n scList[i - 1].adjusted_length = min(\n scList[i - 1].adjusted_length, scList[i].adjusted_length\n )\n del scList[i]\n scListByFrame.append(scList)\n return scListByFrame", "def register_cells(contours: List[np.array]) -> np.array:\n def create_cell(contour: np.array) -> np.array:\n center, radius = cv2.minEnclosingCircle(contour)\n v = (0, 0)\n weight = -1\n\n return np.array([center[0], center[1], v[0], v[1], radius, weight])\n\n areas = np.array([cv2.contourArea(contour) for contour in contours]).astype(\"float32\")\n blobs = np.array([create_cell(contour) for contour in contours]).astype(\"int32\")\n\n return blobs[areas >= 100].astype(\"float32\")", "def pairs_from_list(lights):\n length = len(lights)\n half = int(length / 2)\n offset = 0\n\n centre = None\n if length % 2 == 1:\n centre = lights[half]\n offset = 1\n\n left = lights[:half]\n\n rh_start = half + offset\n right = reversed(lights[rh_start:])\n\n pairs = list(map(list, zip(left, right)))\n\n if centre:\n pairs.append([centre])\n\n return pairs", "def draw_all_position_patterns(img, found, contours):\n draw_img = img.copy()\n for i in found:\n rect = cv2.minAreaRect(contours[i])\n box = np.int0(cv2.cv.BoxPoints(rect))\n cv2.drawContours(draw_img, [box], 0, (0, 0, 255), 2)\n show(draw_img)", "def recognize_pieces(edges, v, squares):\n\n pieces = []\n\n v = cv2.equalizeHist(v)\n for p1, p2 in squares:\n # count the number of slightly centered edges\n occupancy = sum(edges[y][x]\n for x in range(p1.x + 5, p2.x - 5)\n for y in range(p1.y + 5, p2.y - 5))\n\n if occupancy > 70*255:\n corners = (v[p1.y][p1.x], v[p1.y][p2.x],\n v[p2.y][p1.x], v[p2.y][p2.x])\n\n # average v-component of the corners\n avg = sum(map(float, corners)) / len(corners)\n\n # black pixels should be relatively black\n # when compared to the corner average\n black = sum(v[y][x] / avg < 0.2\n for x in range(p1.x, p2.x + 1)\n for y in range(p1.y, p2.y + 1))\n\n if black >= 1000 and black != 1049:\n color = \"B\"\n else:\n color = \"W\"\n\n pieces.append(color)\n else:\n pieces.append(None)\n\n return pieces", "def instance_contours(gti):\n # TODO: move to somewhere better\n import cv2\n\n rc_locs = np.where(gti > 0)\n grouped_cc_rcs = util.group_items(\n np.ascontiguousarray(np.vstack(rc_locs).T),\n gti[rc_locs], axis=0\n )\n\n def bounding_box(rcs):\n rc1 = rcs.min(axis=0)\n rc2 = rcs.max(axis=0)\n return rc1, rc2\n\n # slice out a bounding region around each instance, detect the contour and\n # then offset it back into image coordinates\n grouped_contours = {}\n for label, rcs in grouped_cc_rcs.items():\n rc1, rc2 = bounding_box(rcs)\n sl = (slice(rc1[0], rc2[0] + 2), slice(rc1[1], rc2[1] + 2))\n submask = (gti[sl] == label).astype(np.uint8)\n\n xy_offset = rc1[::-1]\n offset = xy_offset + [-2, -2]\n\n border = cv2.copyMakeBorder(submask, 2, 2, 2, 2, cv2.BORDER_CONSTANT, value=0 )\n _, contors, hierarchy = cv2.findContours(border, cv2.RETR_TREE,\n cv2.CHAIN_APPROX_SIMPLE,\n offset=tuple(offset))\n \"\"\"\n offset = [0, 0]\n BGR_GREEN = (0, 255, 0)\n x = np.ascontiguousarray(util.ensure_alpha_channel(submask)[:, :, 0:3]).astype(np.uint8)\n draw_img = cv2.drawContours(\n image=x, contours=contors,\n contourIdx=-1, color=BGR_GREEN, thickness=2)\n \"\"\"\n # note when len(contours > 1, there is a hole in the building)\n # assert len(contors) == 1\n grouped_contours[label] = contors\n return grouped_contours", "def bbox_from_circle(img, circles):\n seg_imgs = []\n bboxes = []\n aux = img.copy()\n for i,el in enumerate(circles):\n bbox = circle_2_bbox(el['coord'])\n bbox = fix_bbox(bbox,aux.shape)\n cv.rectangle(aux,bbox[0],bbox[1],(0,255,0))\n bboxes.append(bbox)\n return bboxes", "def get_isosceles_trapezoid_mask_vertices(\n m, n,\n x_proportion_bottom=1 / 10,\n x_proportion_top=4 / 10,\n y_proportion_bottom=1,\n y_proportion_top=3/5\n):\n bottom_y = int(m * y_proportion_bottom)\n top_y = int(m * y_proportion_top)\n\n left_lane_bottom_left_pt = (int(n * x_proportion_bottom), bottom_y)\n left_lane_top_left_pt = (int(n * x_proportion_top), top_y)\n\n right_lane_bottom_right_pt = (n - int(n * x_proportion_bottom), bottom_y)\n right_lane_top_right_pt = (n - int(n * x_proportion_top), top_y)\n\n # list(left_lane_top_left_pt),\n # list(right_lane_top_right_pt),\n # list(right_lane_bottom_right_pt),\n # list(left_lane_bottom_left_pt)\n return [left_lane_top_left_pt, right_lane_top_right_pt, right_lane_bottom_right_pt, left_lane_bottom_left_pt]\n\n # return [left_lane_bottom_left_pt, left_lane_top_left_pt, right_lane_bottom_right_pt, right_lane_top_right_pt]" ]
[ "0.6538606", "0.6144932", "0.6041295", "0.5940763", "0.57794034", "0.57555974", "0.5708939", "0.5644752", "0.56134313", "0.56101936", "0.55749595", "0.5529742", "0.5510864", "0.54971457", "0.5470397", "0.54460466", "0.54439086", "0.54020983", "0.5390673", "0.5384446", "0.5359485", "0.53547096", "0.5304629", "0.5230829", "0.52250034", "0.5223473", "0.51871073", "0.51807356", "0.5179046", "0.51636523" ]
0.77581185
0
Find all of the pair centers given a list of pairs of rotated rectangles. The function iterates through a list of pairs of rotated rectangles, storing the center of each pair in a list to be returned by the function, in which the indexes between a pair and its center match up. If the rect_pairs list is empty, an empty list of centers is returned.
def find_pair_centers(rect_pairs): centers = [] for rect1, rect2 in rect_pairs: center = midpoint(rect1[0], rect2[0]) centers.append(center) return centers
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_pair_rects(contours):\n\n rect_pairs = []\n for index, cnt in enumerate(contours):\n # Rotated rect - ( center (x,y), (width, height), angle of rotation )\n rect = cv2.minAreaRect(cnt)\n center_x, center_y = rect[0]\n rect_angle = -round(rect[2], 2)\n\n if rect_angle > 45.0:\n # Iterate through all of the potential matches\n min_x_dist = min_rect = min_index = None\n for pot_index, pot_match in enumerate(contours):\n if np.array_equal(pot_match, cnt):\n continue\n\n match_rect = cv2.minAreaRect(pot_match)\n\n # Check if match is to the right of the contour\n if match_rect[0][0] > rect[0][0] and abs(\n match_rect[2] - rect_angle) > ANGLE_TOLERANCE_DEG:\n x_distance = match_rect[0][0] - rect[0][0]\n\n if min_x_dist is None or x_distance < min_x_dist:\n min_x_dist = x_distance\n min_rect = match_rect\n min_index = pot_index\n\n if min_rect is not None:\n rect_pairs.append((rect, min_rect))\n np.delete(contours, index)\n np.delete(contours, min_index)\n\n return rect_pairs", "def closest_center(rect_pairs):\n centers = find_pair_centers(rect_pairs)\n\n min_dist = min_center = None\n for center in centers:\n dist = distance(center, FRAME_CENTER)\n if min_dist is None or dist < min_dist:\n min_dist = dist\n min_center = center\n\n return min_center", "def make_bounding_box_vectors(coord_pairs):\n\n # indexing multi-d arrays by multi-d arrays is a bit tricky\n # need to be explicit\n # this was helpful: http://bit.ly/1BcSm5y\n coord_pair_grid = np.mgrid[[slice(x) for x in coord_pairs.shape]]\n i0 = coord_pair_grid[0]\n # order by the 2nd axis of the input coord pairs\n # (i.e. compare lon to lon, lat to lat)\n i1 = np.argsort(coord_pairs, axis=1)\n i2 = coord_pair_grid[2]\n\n return coord_pairs[i0, i1, i2]", "def computeIndices(org_center:tuple, rot_center:tuple, xyPairs, angle, org_box_offset:tuple):\n rot_center = np.array([[rot_center[0]], [rot_center[1]]])\n org_center = np.array([[org_center[0]], [org_center[1]]])\n org_box_offset = np.array([[org_box_offset[0]], [org_box_offset[1]]])\n newPairs = xyPairs - rot_center\n\n rotMatrix = buildRotationMatrix(angle)\n\n orgPairs = np.matmul(rotMatrix, newPairs)\n\n return np.array(orgPairs) + org_center + org_box_offset", "def pairs_from_list(lights):\n length = len(lights)\n half = int(length / 2)\n offset = 0\n\n centre = None\n if length % 2 == 1:\n centre = lights[half]\n offset = 1\n\n left = lights[:half]\n\n rh_start = half + offset\n right = reversed(lights[rh_start:])\n\n pairs = list(map(list, zip(left, right)))\n\n if centre:\n pairs.append([centre])\n\n return pairs", "def get_circles_centers(triangles):\n points1, points2, points3 = (triangles[:, 0],\n triangles[:, 1],\n triangles[:, 2])\n # Vectors\n sides1 = points2 - points1\n sides2 = points3 - points1\n # Length of vector of cross product * 2\n area = 2 * (sides1[:, 0] * sides2[:, 1] - sides1[:, 1] * sides2[:, 0])\n\n # (y_2(x_1^2 + y_1^2) - y_1(x_2^2 + y_2^2)) / area + x\n centers_x = ((sides2[:, 1] *\n (np.square(sides1[:, 0]) + np.square(sides1[:, 1])) -\n sides1[:, 1] *\n (np.square(sides2[:, 0]) + np.square(sides2[:, 1]))) /\n area + points1[:, 0])\n centers_y = ((sides1[:, 0] *\n (np.square(sides2[:, 0]) + np.square(sides2[:, 1])) -\n sides2[:, 0] *\n (np.square(sides1[:, 0]) + np.square(sides1[:, 1]))) /\n area + points1[:, 1])\n\n # Transportated.\n return np.array((centers_x, centers_y)).T", "def find_matches_for_start_pairs(pairs, adj_list_ids, adj_list_words):\n all_valid = []\n for p in tqdm(pairs):\n valid = find_matches(p[0], p[1], adj_list_ids, adj_list_words)\n if valid:\n all_valid.append((p, valid))\n return all_valid", "def closest_pair_strip(cluster_list, horiz_center, half_width):\n indices = [] \n\n # Let S be a list of the set {i: |xi - mid| < half_width}\n indices = [index for index in range(len(cluster_list)) if abs(cluster_list[index].horiz_center()-horiz_center)<=half_width]\n \n\t# Sort the indices in S in nondecreasing order of the vertial (y) coordinates of their associated points;\n indices.sort(key = lambda index: cluster_list[index].vert_center())\n\n cl_sorted = cluster_list[:]\n cl_sorted.sort(key = lambda cluster: cluster.vert_center())\n\n # k <-- length(S);\n len_indices = len(indices)\n\n # (d,i,j) <-- (infinity, -1, -1);\n output_list = [float('inf'), -1, -1]\t\n\n # for u <-- 0 to k - 2 do\n for value1 in range(len_indices-1):\n \t# \tfor v <-- u + 1 to min{u+3,k-1} do\n for value2 in range(value1+1, min(value1+6,len_indices)):\n # (d,i,j) <-- min{(d,i,j),(dsub(psubS[u],PsubS[v]), S[u],S[v])}\n min_dist = min(output_list[0], pair_distance(cluster_list,indices[value1],indices[value2])[0])\n if output_list[0] > min_dist:\n indices_sorted = sorted([indices[value1], indices[value2]])\n \toutput_list = [pair_distance(cluster_list,indices[value1],indices[value2])[0], indices_sorted[0], indices_sorted[1]]\n\n # return(d,i,j)\n return tuple(output_list)", "def centers(self):\n def make_centers(x):\n return 0.25 * (x[:-1, :-1] + x[:-1, 1:] + x[1:, :-1] + x[1:, 1:])\n\n polar_centers = make_centers(self.polar_corners)\n azimuthal_centers = make_centers(self.azimuthal_corners)\n\n assert azimuthal_centers.shape == polar_centers.shape\n return polar_centers, azimuthal_centers", "def horizontal_pairings(mat):\n\tw, h = mat.shape\n\tx = mat[:,:-1]\n\ty = mat[:, 1:]\n\t\n\tx_cor_list = []\n\ty_cor_list = []\n\tfor i in range(w):\n\t\tfor j in range(h-1):\n\t\t\tx_cor_list.append(x[i, j])\n\t\t\ty_cor_list.append(y[i, j])\n\n\treturn x_cor_list, y_cor_list", "def centroids2(img_segm,liste):\n m = len(liste)\n xs = np.zeros(m)\n ys = np.zeros(m)\n j=0\n for elt in liste:\n pos_list = np.where(img_segm==elt)\n xs[j] = np.mean(pos_list[0])\n ys[j] = np.mean(pos_list[1])\n j+=1\n return xs,ys", "def centers_and_regions(data, x, y, xmin, xmax):\n # starting at (x, y), using extents xmin\n # first hole is not duplicated, so it is treated differently\n width, gap = data[0]\n result = []\n result.append(((x, y), region_points(x, y, width, xmin, xmax)))\n y = width / 2 + gap\n for width, gap in data[1:]:\n y += width / 2\n result.append(((x, y), region_points(x, y, width, xmin, xmax)))\n result.insert(0, ((x, -y), region_points(x, -y, width, xmin, xmax)))\n y += width / 2 + gap\n return result", "def closest_pair_strip(cluster_list, horiz_center, half_width):\n\n # compute list of indices for the clusters ordered in the horizontal direction\n strip_and_index = [(cluster_list[idx], idx)\n for idx in range(len(cluster_list)) \n if abs(cluster_list[idx].horiz_center() - horiz_center) < half_width]\n #print 'strip_and_index:', strip_and_index\n #hcoord_and_index.sort()\n # print hcoord_and_index\n #horiz_order = [hcoord_and_index[idx][1]\n # for idx in range(len(hcoord_and_index))]\n \n # compute list of indices for the clusters ordered in vertical direction\n vcoord_and_index = [(strip_and_index[idx][0].vert_center(), idx)\n for idx in range(len(strip_and_index))]\n vcoord_and_index.sort()\n vert_order = [strip_and_index[vcoord_and_index[idx][1]][1]\n for idx in range(len(vcoord_and_index))] \n #print 'vert_order:', vert_order \n \n dij = (float('inf'), -1, -1)\n for udx in range(len(vert_order) - 1): # len - 2?\n start = udx + 1\n stop = min(udx + 4, len(vert_order))\n for vdx in range(start, stop):\n dist = pair_distance(cluster_list, vert_order[udx], vert_order[vdx])\n #print 'udx, vdx:', udx, vdx, 'dist:', dist\n if dist[0] < dij[0]:\n dij = dist\n \n return dij", "def __get_centers(data, clusters):\n\n centers = [[] for i in range(len(clusters))]\n dimension = len(data[0])\n\n for index in range(len(clusters)):\n point_sum = [0.0] * dimension\n\n for index_point in clusters[index]:\n point_sum = list_math_addition(point_sum, data[index_point])\n\n centers[index] = list_math_division_number(point_sum, len(clusters[index]))\n\n return centers", "def __find_similar_pairs(self):\n size = len(self.__indexclusters)\n candidates = []\n for i in range(size):\n for j in range(i+1, size):\n simi = self.__cluster_simi(i, j)\n #print simi, self.__indexclusters[i],self.__indexclusters[j]\n if simi >= self.__threshold:\n candidates.append((simi, i, j))\n candidates.sort(reverse = True, key = lambda x: x[0])\n\n\n # filter overlapped pairs\n to_remove = set()\n appeared = set()\n for index, cand in enumerate(candidates):\n if cand[1] not in appeared and cand[2] not in appeared:\n appeared.add(cand[1])\n appeared.add(cand[2])\n else:\n to_remove.add(index)\n\n #print 'ahha'\n #print [(cand[1], cand[2]) for index, cand in enumerate(candidates) if index not in to_remove]\n\n return [(cand[1], cand[2]) for index, cand in enumerate(candidates)\n if index not in to_remove]", "def closest_pair_strip(cluster_list, horiz_center, half_width):\n new_list = [cluster_list[idx].copy() for idx in range(len(cluster_list))]\n new_list.sort(key = lambda cluster: cluster.vert_center())\n indices = []\n for idx in range(len(new_list)):\n if abs(new_list[idx].horiz_center() - horiz_center) < half_width:\n indices.append(idx)\n dist, idx1, idx2 = float(\"inf\"), -1, -1\n total_indices = len(indices)\n if total_indices > 0:\n for idx_u in range(total_indices - 1):\n for idx_v in range(idx_u + 1, min(idx_u + 4, total_indices)):\n dist, idx1, idx2 = min((dist, idx1, idx2), \n pair_distance(new_list, indices[idx_u], \n indices[idx_v]))\n cluster1 = new_list[idx1]\n cluster2 = new_list[idx2]\n new_list.sort(key = lambda cluster: cluster.horiz_center())\n idx1 = min(new_list.index(cluster1), new_list.index(cluster2))\n idx2 = max(new_list.index(cluster1), new_list.index(cluster2))\n return (dist, idx1, idx2)", "def get_candidates(m, dicts, square_coords, naked_sets=None):\n starting_spots = get_starting_spots(m, dicts, square_coords)\n starting_spots.sort(key=itemgetter(2))\n rm, cm, sm = dicts\n c = {}\n for coordinate in starting_spots:\n row, col, missing = coordinate\n c[(row, col)] = [n for n in cm[col] if n in rm[row] and n in sm[square_coords[row, col]]]\n try:\n c[(row, col)] = [n for n in c[(row, col)] if n not in naked_sets[(row, col)]]\n except (KeyError, TypeError):\n continue\n return c", "def get_square_centers(self):\n x_values = np.arange(-2, 4, 2) * np.ones(self.GRID_SHAPE)\n y_values = np.arange(2, -4, -2).reshape((3, 1)) * np.ones(self.GRID_SHAPE)\n x_values *= self.spacing\n x_values += self.center[0] # add x-coordinate for grid center\n y_values *= self.spacing\n y_values += self.center[1] # add y-coordinate for grid center\n return np.dstack((x_values, y_values))", "def vertical_pairings(mat):\n\tw, h = mat.shape\n\tx = mat[:-1,:]\n\ty = mat[1:, :]\n\n\tx_cor_list = []\n\ty_cor_list = []\n\tfor i in range(w-1):\n\t\tfor j in range(h):\n\t\t\tx_cor_list.append(x[i, j])\n\t\t\ty_cor_list.append(y[i, j])\n\n\treturn x_cor_list, y_cor_list", "def closest_pair_strip(cluster_list, horiz_center, half_width):\n # Let S be a list of the set{i:|xi−mid|< w}\n set_strip = []\n for cluster in cluster_list:\n if abs(cluster.horiz_center() - horiz_center) < half_width:\n set_strip.append(cluster)\n set_strip.sort(key=lambda cluster: cluster.vert_center())\n num = len(set_strip)\n dist, idx_i, idx_j = float(\"inf\"), -1, -1\n if num <= 1:\n return float(\"inf\"), -1, -1\n elif num < 3:\n for idx_u in range(num - 1):\n dist_u = set_strip[idx_u].distance(set_strip[idx_u + 1])\n idx_i = cluster_list.index(set_strip[idx_u])\n idx_j = cluster_list.index(set_strip[idx_u + 1])\n dist, idx_i, idx_j = min((dist, idx_i, idx_j), (dist_u, min(idx_i, idx_j), max(idx_i, idx_j)))\n return dist, idx_i, idx_j\n\n else:\n for idx_u in range(num - 1):\n for idx_v in range(idx_u + 1, min(idx_u + 3, num - 1) + 1):\n dist_uv = set_strip[idx_u].distance(set_strip[idx_v])\n idx_i_uv = cluster_list.index(set_strip[idx_u])\n idx_j_uv = cluster_list.index(set_strip[idx_v])\n dist, idx_i, idx_j = min((dist, idx_i, idx_j),\n (dist_uv, min(idx_i_uv, idx_j_uv), max(idx_i_uv, idx_j_uv)))\n return dist, idx_i, idx_j", "def get_all_possible_os_pairings(indices_list):\n pairs = []\n itr = 0\n\n for links in indices_list:\n\n for item in links:\n for i in range(itr,len(links)):\n\n if item == links[i]:\n continue\n else:\n pair = item, links[i]\n pairs.append(pair)\n return pairs", "def get_intersect(pair1, pair2):\n # calculate the homogeneous coords\n tmp = np.vstack((pair1, pair2))\n h = np.hstack((tmp, np.ones((4, 1))))\n\n # line through each pair of points\n l1 = np.cross(h[0], h[1])\n l2 = np.cross(h[2], h[3])\n\n # get the intersect\n x, y, z = np.cross(l1, l2)\n x /= z\n y /= z\n return x, y", "def _getTopCenterIndices(self, resolution, rectangular):\n # get x, y indices to get away from the ring basis.\n # indices starts with (0, 0) in the middle, with (r2, p1) -> (1, 0), etc. (x is on the pos 1 ray)\n\n numAxialLevels = 2 * resolution\n xi, yi = self.indices()\n if rectangular:\n topCenterI = 2 + (3 * resolution) * xi\n else:\n # 4*d b/c each increase in xi moves you back by numstacks/2\n topCenterI = 1 + (4 * resolution) * xi + (yi * numAxialLevels)\n topCenterJ = 1 + xi * numAxialLevels // 2 + numAxialLevels * yi\n return topCenterI, topCenterJ", "def _get_x_center_pts(halfway_x, halfway_y):\n return reduce(iconcat, _get_pt_tuple(range(1, halfway_x),\n range(1, halfway_y)))", "def get_candidate_locations(cur_location, radius, row_num, col_num):\n cur_y, cur_x = cur_location\n delta = int(radius)\n max_x = cur_x + delta if cur_x + delta < col_num else col_num - 1\n min_x = cur_x - delta if cur_x - delta >= 0 else 0\n max_y = cur_y + delta if cur_y + delta < row_num else row_num - 1\n min_y = cur_y - delta if cur_y - delta >= 0 else 0\n candidates = []\n for x in range(min_x, max_x + 1):\n for y in range(min_y, max_y + 1):\n if distance(cur_x, cur_y, x, y) < radius:\n candidates.append((y, x))\n return candidates", "def closest_pair_strip(cluster_list, horiz_center, half_width):\n # creating a list of the indexes of the clusters in the strip\n strip_points = [p_idx for p_idx in range(len(cluster_list)) if abs(cluster_list[p_idx].horiz_center() - horiz_center) <= half_width]\n strip_points.sort()\n\n # initializing variables\n points_in_strip = len(strip_points)\n closest_pair = (float('inf'), -1, -1)\n\n for u_idx in range(points_in_strip - 1):\n for v_idx in range(u_idx + 1, points_in_strip):\n closest_pair = min(closest_pair,\n pair_distance(cluster_list, strip_points[u_idx], strip_points[v_idx]))\n\n return closest_pair", "def unpruned_atom_pairs(\n molecules: List[masm.Molecule], idx_map: List[Tuple[int, int]], distance_bounds: Tuple[int, int]\n) -> Set[Tuple[int, int]]:\n\n def structure_idx(c: int, i: int) -> int:\n return idx_map.index((c, i))\n\n pairs: Set[Tuple[int, int]] = set()\n\n for component, molecule in enumerate(molecules):\n for i in molecule.graph.atoms():\n distances = np.array(masm.distance(i, molecule.graph))\n partners = np.nonzero((distances <= max(distance_bounds)) & (distances >= min(distance_bounds)))[0]\n\n # Back-transform to structure indices and add to set\n s_i = structure_idx(component, i)\n s_partners = [structure_idx(component, j) for j in partners]\n pairs |= set(make_sorted_pair(s_i, s_j) for s_j in s_partners)\n\n return pairs", "def generate_centers(self):\n\t\tcenters = []\n\t\tsize = self.config.image_size\n\t\tfor i in range(self.config.num_obj):\n\t\t\tflag = True\n\t\t\twhile flag:\n\t\t\t\tc = np.random.randint(int(size * 0.05), int(size * 0.95), 2)\n\t\t\t\tflag = False\n\t\t\t\tfor center in centers:\n\t\t\t\t\tif (abs(center[0] - c[0]) <= 0.1 * size) or (abs(center[1] - c[1]) <= 0.1 *size):\n\t\t\t\t\t\tflag = False\n\t\t\tcenters.append(c)\n\t\t\t\t\n\t\treturn centers", "def get_panels(rectangles):\n\n pairs = []\n for rect in rectangles:\n if (2 * rect[1][0] < rect[1][1]) or (rect[1][0] > 2 * rect[1][1]):\n if 2 * rect[1][0] < rect[1][1]:\n long_dim1 = 0\n elif rect[1][0] > 2 * rect[1][1]:\n long_dim1 = 1\n\n box = cv2.boxPoints(rect)\n box2 = []\n min_angle = 10;\n for rect2 in rectangles:\n if 2 * rect[1][0] < rect[1][1]:\n long_dim2 = 0\n elif rect[1][0] > 2 * rect[1][1]:\n long_dim2 = 1\n if (rect2 != rect) and (abs(rect[2] - rect2[2]) < min_angle) and (long_dim1 == 1 and long_dim2 == 1):\n box2 = cv2.boxPoints(rect2)\n min_angle = abs(rect[2] - rect2[2])\n\n if len(box2) != 0:\n box_pair = (box, box2)\n pairs.append(box_pair)\n\n return pairs", "def _find_pairs(self):\n pairs = []\n for i, p in enumerate(self.frame_0):\n nearests = np.where(np.linalg.norm(self.frame_1 - p, axis=1) <= self.R_p)[0]\n # add probability missing pair.\n nearests = np.append(nearests, -1)\n prob = np.zeros_like(nearests) + 1.0 / nearests.shape[0]\n\n ind_prob = np.vstack([nearests, prob])\n\n pairs.append(ind_prob)\n\n return pairs" ]
[ "0.62026006", "0.6162701", "0.6087304", "0.59834564", "0.5873865", "0.5657451", "0.5628694", "0.56004316", "0.55566496", "0.54699785", "0.53669983", "0.53555965", "0.53241456", "0.53177077", "0.5304112", "0.52581257", "0.5243059", "0.5207223", "0.5129527", "0.5098375", "0.50661993", "0.5056622", "0.50476", "0.5044029", "0.5013273", "0.500882", "0.49883497", "0.498427", "0.49788007", "0.49787423" ]
0.8494008
0
Gets the center of the rectangle pair closest to the center of the frame. Iterates through the rotated rectangle pairs, finding the rect pair that is closest to the center of the frame and then returning the center point of that rect pair or None if the rect_pairs list is empty. This function, however, does not detect center point of a frame, but uses the global FRAME_CENTER variable in this script.
def closest_center(rect_pairs): centers = find_pair_centers(rect_pairs) min_dist = min_center = None for center in centers: dist = distance(center, FRAME_CENTER) if min_dist is None or dist < min_dist: min_dist = dist min_center = center return min_center
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_pair_centers(rect_pairs):\n\n centers = []\n for rect1, rect2 in rect_pairs:\n center = midpoint(rect1[0], rect2[0])\n centers.append(center)\n\n return centers", "def get_rect_center(rect):\n x, y, w, h = rect\n return x + w/2, y + h/2", "def get_center(self):\n return center_points(np.expand_dims(self.best_box, axis=0))[0]", "def find_center(r):\n cx=r.corner.x+(r.width/2)\n cy=r.corner.y+(r.height/2)\n return cx,cy", "def rectCenter(rect):\n return np.array([rect[:2].mean(), rect[2:].mean()])", "def find_center(self):\n return(Point(self.corner.x + self.width/2.0, self.corner.y + self.height/2.0))", "def get_center(center, rad, speed_limit):\n i = Intersection(center, rad, speed_limit)\n return i.get_center()", "def _get_bounding_box(self, frame, bounding_offset):\n\n # Try to find board if the boundingbox is not set\n center, ellipse, mask = self.board.detect(frame)\n\n # Should not be None\n if center is None:\n print(\"skipping frame\")\n return None\n if ellipse is None:\n print(\"skipping frame\")\n return None\n if mask is None:\n print(\"skipping frame\")\n return None\n\n self.point_mask = mask\n # cv2.imshow(\"mask\", mask)\n\n x_offset = (ellipse[1][0] / 2)\n x_center = ellipse[0][0]\n\n y_offset = ellipse[1][1] / 2\n y_center = ellipse[0][1]\n\n minx = max(0, x_center - x_offset - bounding_offset)\n maxx = min(self.width, x_center + x_offset + bounding_offset)\n miny = max(0, y_center - y_offset - bounding_offset)\n maxy = min(self.height, y_center + y_offset + bounding_offset)\n return ((int(minx), int(miny)), (int(maxx), int(maxy)))", "def _get_center_pos(self):\n if not hasattr(self, 'lon_center'):\n raise ValueError('ERROR: You need to specify first the center position!')\n d = np.abs((self.x.lon - self.lon_center) ** 2. + (self.x.lat - self.lat_center) ** 2.)\n dmin = d.min()\n m = d == dmin\n\n idx = np.indices(d.shape)\n i = idx[0][m][0]\n j = idx[1][m][0]\n\n if (np.abs(1. - self.x.lon[i, j] / self.lon_center) > 0.05) or (np.abs(1. - self.x.lat[i, j] / self.lat_center) > 0.05): # at least 5% acc.\n print 'lon: ', self.x.lon[i, j], self.lon_center\n print 'lat: ', self.x.lat[i, j], self.lat_center\n i = None\n j = None\n return i, j", "def get_center_position(matrix):\n\n\trow = len(matrix)\n\tcolumn = len(matrix[0])\n\tpossible_center = {}\n\tgrid_vals = []\n\tif row % 2 and column % 2:\n\t\tcenter = [row / 2, column / 2]\n\tif (not (row % 2)) and (not (column % 2)):\n\t\tpossible_center = {\n\t\t\t0: [row /2, column / 2],\n\t\t\t1: [row / 2 - 1, column / 2],\n\t\t\t2: [row / 2, column / 2 - 1],\n\t\t\t3: [row / 2 - 1, column / 2 - 1],\n\t\t}\n\t \n\t\tgrid_vals = [matrix[row /2][column / 2],\n\t\t\t\t\tmatrix[row / 2 - 1][column / 2],\n\t\t\t\t\tmatrix[row / 2][column / 2 - 1],\n\t\t\t\t\tmatrix[row / 2 - 1][column / 2 - 1]]\n \t\n \tif row % 2 and (not (column % 2)):\n\t\tpossible_center = {\n\t\t\t0: [row /2, column / 2],\n\t\t\t1: [row / 2, column / 2 - 1],\n\t\t}\n \n\t\tgrid_vals = [matrix[row /2][column / 2],\n\t\t\t\t\tmatrix[row / 2][column / 2 - 1]]\n\n\tif (not (row % 2)) and column % 2:\n\t\tpossible_center = {\n\t\t\t0: [row /2, column / 2],\n\t\t\t1: [row / 2 - 1, column / 2],\n\t\t}\n\t\tgrid_vals = [matrix[row /2][column / 2],\n\t\t\t\t\tmatrix[row / 2 - 1][column / 2]]\n \t\n\tif possible_center:\n\t\tcenter_help = grid_vals.index(max(grid_vals))\n\t\tcenter = possible_center[center_help]\n\teaten = matrix[center[0]][center[1]]\n\tmatrix[center[0]][center[1]] = 0\n\treturn [matrix, center, eaten]", "def _near_center(self, xy, frame, tol=0.75):\n cxy = self._get_frame_center(frame)\n d = calc_length(xy, cxy)\n tol *= self.pxpermm\n return d < tol", "def circle_center(top_aerofoil_points, bottom_aerofoil_points):\n q = np.array(top_aerofoil_points[0].coordinates) - np.array(top_aerofoil_points[1].coordinates)\n r = np.array(bottom_aerofoil_points[-1].coordinates) - np.array(bottom_aerofoil_points[-2].coordinates)\n c = np.cross(q, [0, 0, -1]) / np.linalg.norm(q)\n d = np.cross(r, [0, 0, 1]) / np.linalg.norm(r)\n radius = (q[1] - r[1]) / (d[1] - c[1])\n s = q + radius * c\n return Point(tuple(-s))", "def get_center_coordinates(game):\n \n return math.ceil(game.height / 2), math.ceil(game.width / 2)", "def get_arc_center(self):\n # First two anchors and handles\n a1, h1, h2, a2 = self.points[:4]\n # Tangent vectors\n t1 = h1 - a1\n t2 = h2 - a2\n # Normals\n n1 = rotate_vector(t1, TAU / 4)\n n2 = rotate_vector(t2, TAU / 4)\n try:\n return line_intersection(\n line1=(a1, a1 + n1),\n line2=(a2, a2 + n2),\n )\n except Exception:\n warnings.warn(\"Can't find Arc center, using ORIGIN instead\")\n return np.array(ORIGIN)", "def get_pos(self, frame):\n frame = self.perspective_shift(frame)\n \n puck_mask = self.color_mask(frame, self.color_green, thresh=15)\n striker_mask = self.color_mask(frame, self.color_orange, thresh=25, blur=5)\n \n puck_loc, _ = self.find_centroids(puck_mask)\n striker_locs, _ = self.find_centroids(striker_mask, 2)\n \n p_pos = self.abs_to_meter(puck_loc[0])\n # cases: (pos,pos), (pos,None), (None,None)\n if striker_locs[0] is not None:\n pos_1 = self.abs_to_meter(striker_locs[0])\n pos_2 = self.abs_to_meter(striker_locs[1])\n s1_pos = pos_1 if pos_1[1]<0 else pos_2\n s2_pos = pos_2 if pos_1[1]<0 else pos_1\n else:\n s1_pos, s2_pos = None, None \n \n return [p_pos, s1_pos, s2_pos]", "def _get_frame_center(self, src):\n w, h = get_size(src)\n x = w / 2\n y = h / 2\n\n return x, y", "def center(rect1, rect2, xoffset=0, yoffset=0):\n return (rect1.centerx - rect2.centerx + xoffset, rect1.centery - rect2.centery + yoffset)", "def find_centermost_cell(self, cells):\n \n closest_cell = None\n \n for current_cell in cells:\n current_dist = abs(current_cell.rect.centery - self.player.rect.centery)\n if closest_cell is None or current_dist < closest_dist:\n closest_cell = current_cell\n closest_dist = current_dist\n\n return closest_cell", "def center(self) -> Tuple[int, int]:\n center_x = int((self.x1 + self.x2) // 2)\n center_y = int((self.y1 + self.y2) // 2)\n return (center_x, center_y)", "def getpolycenter(poly):\n polylength = len(poly)\n\n return (\n round(sum(x for x, y in poly) / polylength, 2),\n round(sum(y for x, y in poly) / polylength, 2)\n )", "def get_pair_rects(contours):\n\n rect_pairs = []\n for index, cnt in enumerate(contours):\n # Rotated rect - ( center (x,y), (width, height), angle of rotation )\n rect = cv2.minAreaRect(cnt)\n center_x, center_y = rect[0]\n rect_angle = -round(rect[2], 2)\n\n if rect_angle > 45.0:\n # Iterate through all of the potential matches\n min_x_dist = min_rect = min_index = None\n for pot_index, pot_match in enumerate(contours):\n if np.array_equal(pot_match, cnt):\n continue\n\n match_rect = cv2.minAreaRect(pot_match)\n\n # Check if match is to the right of the contour\n if match_rect[0][0] > rect[0][0] and abs(\n match_rect[2] - rect_angle) > ANGLE_TOLERANCE_DEG:\n x_distance = match_rect[0][0] - rect[0][0]\n\n if min_x_dist is None or x_distance < min_x_dist:\n min_x_dist = x_distance\n min_rect = match_rect\n min_index = pot_index\n\n if min_rect is not None:\n rect_pairs.append((rect, min_rect))\n np.delete(contours, index)\n np.delete(contours, min_index)\n\n return rect_pairs", "def center(self):\n xc = (self.x.max() + self.x.min())/2.\n yc = (self.y.max() + self.y.min())/2.\n return (xc, yc)", "def get_center_scr(self):\r\n return self.rect.center", "def get_view_center(view_dict):\n bounds = view_dict['bounds']\n return (bounds[0][0] + bounds[1][0]) / 2, (bounds[0][1] + bounds[1][1]) / 2", "def get_center_point(bbox):\n x_middle = 42\n y_middle = 42\n\n # HINT: bbox.xmin, bbox,xmax, bbox.ymin, bbox.ymax\n return (x_middle, y_middle)", "def find_center(self) -> tuple:\r\n \r\n # Add up all the x values of pixels in the plant\r\n # Then divide by total pixels in the plant\r\n avg_x = sum([i[0] for i in self.cluster]) / len(self.cluster)\r\n\r\n # Add up all the y values of pixels in the plant\r\n # Then divide by total pixels in the plant\r\n avg_y = sum([i[1] for i in self.cluster]) / len(self.cluster)\r\n\r\n self.center = (int(round(avg_x)), int(round(avg_y)))\r\n \r\n # return the results in a tuple of integers\r\n return self.center", "def find_center(garden):\n\n # Find center: j is 'row', i is 'col'\n # Initialize j and i, rows and cols\n j, i = -1, -1\n num_rows = len(garden)\n num_cols = len(garden[0])\n\n # This section should be cleaned up before pushing\n if num_rows % 2 != 0:\n j = num_rows // 2\n else:\n j1, j2 = num_rows // 2, num_rows // 2 - 1\n\n if j != -1:\n if num_cols % 2 != 0:\n i = num_cols // 2\n else:\n # Find the most carrots near the center of the row j\n i = garden[j].index(max(garden[j][num_cols//2], garden[j][num_cols//2]-1))\n\n else:\n if num_cols % 2 != 0:\n i1 = garden[j1][num_cols//2]\n i2 = garden[j2][num_cols//2]\n else:\n i1 = max(garden[j1][num_cols//2], garden[j1][num_cols//2]-1)\n i2 = max(garden[j2][num_cols//2], garden[j2][num_cols//2]-1)\n\n ival = max(i1, i2)\n if ival == i1:\n j = j1\n else:\n j = j2\n\n i = garden[j].index(ival)\n\n return (j, i)", "def absolute_collide_topleft(self):\n x,y = self.position\n rect = self.collide_rect\n return (x-rect.width/2, y-rect.height)", "def get_pos(self) -> tuple:\n return self.rect.center", "def center_on_box(img, radius, min_ref, xmin, xmax, ymin, ymax, na_val=-9999):\n x, y = num.meshgrid(num.arange(-radius, radius), num.arange(-radius, radius))\n coords = [(i, j) for i, j in zip(x.flatten(), y.flatten()) if (i ** 2 + j ** 2) ** 0.5 <= radius]\n fit = [num.mean(img[(xmin + i):(xmax + i), (ymin + j):(ymax + j)]) for i, j in coords]\n if num.nanmin(fit) <= min_ref:\n return num.array(coords[num.nanargmin(fit)])\n else:\n return num.array([na_val, na_val])" ]
[ "0.67331475", "0.64515555", "0.6001059", "0.59175", "0.5892248", "0.58833814", "0.585424", "0.5841984", "0.5752221", "0.5712794", "0.56921977", "0.56689316", "0.5661196", "0.56526184", "0.56173843", "0.5592641", "0.5551825", "0.5525141", "0.54953676", "0.5477719", "0.5455593", "0.54447275", "0.54325014", "0.54300034", "0.54215807", "0.5416267", "0.5410803", "0.5376754", "0.53738576", "0.5371567" ]
0.8538978
0
Find the angle between cX and the center of the frame.
def horizontal_angle(cX): return atan(((FRAME_CENTER[0] + .5) - cX) / FOCAL_LENGTH)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _angle(self, a, b, c):\n divid = (a ** 2 + b ** 2 - c ** 2)\n divis = (2 * a * b)\n if (divis) > 0:\n result = float(divid) / divis\n if result <= 1.0 and result >= -1.0:\n return acos(result)\n return 0\n else:\n return 0", "def angle(self):\n return arccos(dot((self.a - self.o) / self.r, (self.b - self.o) / self.r))", "def getAngle(self):\n x, y = self.components\n return math.atan2(y, x)", "def get_x_y_from_center(center, angle):\n print \"center\", center\n size_of_img = (640, 480)\n alpha_x = angle + (center[1] - 0.5 * size_of_img[1]) * camera_y_angle / size_of_img[1] \n alpha_y = (center[0] - 0.5 * size_of_img[0]) * camera_x_angle / size_of_img[0] \n print \"angle y :\", alpha_y\n delta_x = height / math.tan(math.radians(alpha_x))\n d = math.sqrt(delta_x ** 2 + height ** 2)\n delta_y = d * math.sin(math.radians(alpha_y))\n return round(delta_x), round(delta_y)", "def angle(self) -> float:\n ...", "def angle(self) -> int:", "def get_angle(a, b, c):\n\n ba = a - b\n cb = c - b\n\n ba_mod = mod(ba)\n cb_mod = mod(cb)\n val = dot(ba, cb) / (ba_mod * cb_mod)\n # better fix?\n if val > 1:\n val = 1\n elif val < -1:\n val = -1\n\n return np.arccos(val)", "def angle(self):\n return 0", "def calculate_yaw(pixel_x, center_x) -> float:\n yaw = math.degrees(math.atan((pixel_x - center_x) / H_FOCAL_LENGTH))\n return yaw", "def calculate_angle(centre, prev_centre):\n o = centre[1] - prev_centre[1]\n a = centre[0] - prev_centre[0]\n return round(math.degrees(math.atan2(o, a)))", "def getAngle(self):\n return self.articulateEncoder.getDistance()+self.angleOffset", "def acos (cls, x) :\n return Angle_R (math.acos (x))", "def compute_angle(self, a, b, c):\n\n ba = a - b\n bc = c - b\n\n cosine_angle = np.dot(ba, bc) / \\\n (np.linalg.norm(ba) * np.linalg.norm(bc))\n\n # because of precision issues, sometimes cosine_angle is something linke -1.000000001\n # we make sure we only pass the correct arguments to np.arccos()\n if cosine_angle > 1:\n cosine_angle = 1\n elif cosine_angle < -1:\n cosine_angle = -1\n\n angle = np.arccos(cosine_angle)\n\n return np.degrees(angle)", "def angle(self):\n return atan2(self.v.y, self.v.x)", "def angle(self):\n return angle(self.force, self.forceXYZ, self.excited_axis,\n self.distance, self.distanceXYZ)", "def angle_from_point( x, img_width=640, fov_angle=44 ):\r\n return( -( ( img_width / 2 ) - x ) * fov_angle )", "def atan(self, x):\n return self.arctan(x)", "def angle(z):", "def calculate_attitude_angle(self):\n return np.arctan(np.pi * (1 - self.eccentricity_ratio ** 2) / (4 * self.eccentricity_ratio))", "def angle(self):\n v = self.p1 - self.p0\n return atan2(v.y, v.x)", "def atan (cls, x) :\n return Angle_R (math.atan (x))", "def centralAngle(self):\n global central_angle\n central_angle = always_redraw(\n lambda : Angle(radius_horiz, radius_ang, radius=0.25, stroke_color=YELLOW)\n )\n\n global central_angle_label\n central_angle_label = always_redraw(\n lambda : MathTex(\"x\", stroke_color=GREEN).scale(0.75).move_to(\n LEFT*5+UP*(0.3*self.x_max*np.sin(0.5*theta.get_value()*DEGREES))+RIGHT*(0.3*self.x_max*np.cos(0.5*theta.get_value()*DEGREES)))\n )\n\n self.play(Write(central_angle), Write(central_angle_label))", "def deltaAngle(x, y):\n return math.atan2(math.sin(x-y), math.cos(x-y))", "def angle(self):\n return self._angle", "def angle(self):\n return self._angle", "def angle(self):\n return self._angle", "def mean_sweep_angle(self) -> float:\n root_quarter_chord = self.xsecs[0].quarter_chord()\n tip_quarter_chord = self.xsecs[-1].quarter_chord()\n\n vec = tip_quarter_chord - root_quarter_chord\n vec_norm = vec / np.linalg.norm(vec)\n\n sin_sweep = vec_norm[0] # from dot product with x_hat\n\n sweep_deg = np.arcsind(sin_sweep)\n\n return sweep_deg", "def angle(self) -> float:\n return self._angle", "def angle(self) -> float:\n return self._angle", "def xyangle(self,xc=None,yc=None):\n if xc == None:\n xc = self.x1\n if yc == None:\n yc = self.y1\n dx = self.x-xc\n dy = self.y-yc\n self.angle = arctan2(dx,dy) # in radians\n self.sin = sin(self.angle)\n self.cos = cos(self.angle)" ]
[ "0.67299557", "0.66971684", "0.6631527", "0.65955645", "0.65902543", "0.65793175", "0.6507206", "0.65059215", "0.64560986", "0.64042515", "0.63694596", "0.63620293", "0.63507414", "0.63388413", "0.63061696", "0.6257793", "0.6250895", "0.6244727", "0.62382525", "0.6227177", "0.6225504", "0.6222526", "0.6213939", "0.6208941", "0.6208941", "0.6208941", "0.6183147", "0.6181958", "0.6181958", "0.6180313" ]
0.80907357
0
Find the midpoint between point1 and point2.
def midpoint(point1, point2): x, y = (int((point1[0] + point2[0]) / 2), int((point1[1] + point2[1]) / 2)) return (x, y)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mid_point(a: Point, b: Point) -> Point:\n return Point((a.x + b.x) / 2, (a.y + b.y) / 2)", "def midpoint(p1, p2):\n return np.array([(p1[0] + p2[0]) / 2, (p1[1] + p2[1]) / 2, (p1[2] + p2[2]) / 2])", "def midpoint(ptA, ptB):\n return( (ptA[0] + ptB[0]) * 0.5, (ptA[1]+ ptB[1]) * 0.5 )", "def midpoint(a, b):\n return ((a[0] + b[0]) / 2, (a[1] + b[1]) / 2)", "def _mid(pt1, pt2):\n (x0, y0), (x1, y1) = pt1, pt2\n return 0.5 * (x0 + x1), 0.5 * (y0 + y1)", "def midpoint(self, other: PointType = None) -> PointType:\n return (self + (other or Point())) / 2", "def mid(p1, p2):\n return (p1[0]+p2[0])/2, (p1[1]+p2[1])/2, (p1[2]+p2[2])/2", "def test_midpoint(self):\n p1 = Point(0, 0)\n p2 = Point(10, 10)\n midpoint = p1.midpoint(p2)\n self.assertAlmostEqual(midpoint.lat, 5)\n self.assertAlmostEqual(midpoint.lon, 5)", "def midpoint(a, b):\n mp = [(a.x + b.x) / 2, (a.y + b.y) / 2]\n return Vector(*mp)", "def midpoint_euclidean(self, x1, y1, x2, y2):\n dist_x = abs(x1 - x2) / 2.\n dist_y = abs(y1 - y2) / 2.\n res_x = x1 - dist_x if x1 > x2 else x2 - dist_x\n res_y = y1 - dist_y if y1 > y2 else y2 - dist_y\n return res_x, res_y", "def mid(p1, p2):\n\treturn [(p1[0]+p2[0])/2., (p1[1]+p2[1])/2.]", "def _middle_point(p1, p2):\n x = int((p1.x + p2.x) / 2)\n y = int((p1.y + p2.y) / 2)\n return (x, y)", "def find_midpoint(start, end):\n mid = (start + end) / 2\n return int(mid)", "def midpoint_line(a, b):\n return scale_vector(add_vectors(a, b), 0.5)", "def midpoint(self) -> Tuple[int, int]:\n pass", "def getMidPoint(self):\n return p.Point((self.start.normalVector + self.end.normalVector)/2.0)", "def midpoint(self) -> Tuple[int, int]:\n minx, miny, maxx, maxy = self.substrates.bounds\n return ((minx + maxx) // 2, (miny + maxy) // 2)", "def midpoint(f, x0, h):\n return 2.0*h*f(x0+h);", "def mid_point(self, vector):\n return self.eval_2pts(vector, 0.5)", "def midcoords(p, c1, c2):\n return make_coords(pos=midpoint(p, c1.worldpos(), c2.worldpos()),\n rot=midrot(p, c1.worldrot(), c2.worldrot()))", "def get_intersection_point(l1, l2):\n m, b = l1\n n, c = l2\n # Find when mx + b = nx + c\n # mx - nx = c - b\n # And...\n x = (c-b) / (m-n)\n # Then plug back in\n y = m*x + b\n return (x, y)", "def findNearPointOnLine(node1, node2, point):\n p=point[0]\n q=point[1]\n a=node1[0]\n b=node1[1]\n c=node2[0]\n d=node2[1]\n \n x = ((a-p)*(d-b) + (q-b)*(c-a)) / ((d-b)**2+(c-a)**2) * (d-b) + p\n y = ((a-p)*(d-b) + (q-b)*(c-a)) / ((d-b)**2+(c-a)**2) * (a-c) + q\n \n return x, y", "def midpt_formula(loc1, loc2):\n xm = (loc1[0] + loc2[0])/2.0\n ym = (loc1[1] + loc2[1])/2.0\n return [xm, ym]", "def midpoint(self,i,f):\n\n summation = self.points[f, :] + self.points[i, :]\n midploint = summation/2\n x_mid = midploint[0]\n y_mid = midploint[1]\n\n return x_mid,y_mid", "def get_midpoint(half_distance: int, steps: [(int, str, str)]) -> (str, str):\r\n cur_distance = 0\r\n cur_step = 0\r\n while(cur_distance + steps[cur_step][0] <= half_distance):\r\n cur_distance += steps[cur_step][0]\r\n cur_step += 1\r\n\r\n left_over_distance = half_distance - cur_distance\r\n\r\n # lat, lng\r\n startpos = (steps[cur_step][1]['lat'],steps[cur_step][1]['lng'])\r\n endpos = (steps[cur_step][2]['lat'],steps[cur_step][2]['lng'])\r\n lat_length = endpos[0] - startpos[0]\r\n lng_length = endpos[1] - startpos[1]\r\n\r\n\r\n # get angle in radians\r\n angle = math.atan(lng_length/lat_length)\r\n\r\n if (endpos[0] > startpos[0] and endpos[1] > startpos[1]) or \\\r\n (endpos[0] < startpos[0] and endpos[1] > startpos[1]):\r\n return (startpos[0]+left_over_distance*math.cos(angle)/111000, startpos[1]+left_over_distance*math.sin(angle)/111000)\r\n return (startpos[0]-left_over_distance*math.cos(angle)/111000, startpos[1]-left_over_distance*math.sin(angle)/111000)", "def midpoint(bbox):\n return (0.5*(bbox[0][0] + bbox[1][0]), 0.5*(bbox[0][1] + bbox[1][1]))", "def calculate_slope_between_two_points(point_a: Dict[str,float], point_b: Dict[str, float]) -> float: # _5 [✅] \n if len(point_a) == len(point_b) == 0: raise ValueError\n if set(point_a).symmetric_difference(set(point_b)) == set():\n return float('inf') if int(point_b['x'] - point_a['x']) == 0 else int((int(point_b['y'] - point_a['y']) / int(point_b['x'] - point_a['x'])))\n elif set(point_a).symmetric_difference(set(point_b)) != set(): raise ValueError\n elif point_a['x'] == point_b['x'] and point_b['y'] == point_a['y']: return float('inf')", "def measure_between_two_points(self, point_a, point_b):\n # cHaversine expects points to be given as (latitude, longitude) pairs.\n # TODO: Determine if this check for non-null values is necessary.\n if point_a and point_b:\n return haversine(tuple(point_a), tuple(point_b))", "def GetPointToPointDistance(self, point1, point2):\n return math.sqrt(vtk.vtkMath.Distance2BetweenPoints(point1, point2))", "def angle_midpoint(ang1,ang2,units):\n return ang1 + angle_difference(ang1,ang2,units)/2." ]
[ "0.86495996", "0.84351975", "0.8341314", "0.83263826", "0.81008357", "0.79440206", "0.7909224", "0.76864284", "0.75956684", "0.756792", "0.7434356", "0.7314022", "0.71300644", "0.71001923", "0.70689064", "0.6998397", "0.6986347", "0.6792534", "0.67725915", "0.67189986", "0.65608615", "0.6542041", "0.6540149", "0.6482157", "0.6477102", "0.64605445", "0.6435201", "0.63946205", "0.63469124", "0.63028914" ]
0.8921536
0
Find the tape using the VideoCapture object in this script and display it. Fetches a frame from the CAP object in this script and finds the horizontal angle between the center of the frame and the tape pair closest to the center of the frame. It prints the horizontal angle and draws various information on the frame before displaying it in a window.
def find_tape(): _, frame = CAP.read() hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv, color_lower, color_upper) _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # Find all valid pair rects, and reutrn if none found pair_rects = get_pair_rects(contours) if len(pair_rects) == 0: return # If found, continue on and post results center = closest_center(pair_rects) to_send = '{}:{}\n'.format( round(time.time(), 3), round(degrees(horizontal_angle(center[0])), 3)) print(to_send) s.send(bytearray(to_send, 'utf-8'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def record_project():\n # open the video and save the frame and return the fW,fH and the frame\n frame = video_handle_for_demo()\n\n # detect the blue square and resize the frame\n image = detect_and_rotate(frame)\n if image is None:\n return [-100, -100, 0]\n\n fW, fH, _ = image.shape\n\n # detect both yellow and green square for further angle and center computation\n x2g, y2g, xfg, yfg, frameg = frame_analysis_green(fW, fH, image)\n\n x2y, y2y, xfy, yfy, framey = frame_analysis_yellow(fW, fH, image)\n\n # Correct the coordinate to have them in the true axis\n # x2_ are the coordinate in grid referential\n # xf_ are the coordinate in pixel of the resized image referential\n\n x2y = xfy\n x2g = xfg\n y2g = yfg\n y2y = yfy\n\n # Compute the thymio center in grid's coordinate\n xc = (x2g + x2y) / 2\n yc = (y2g + y2y) / 2\n print(\"Picture of thymio with computed center represented by a green dot\")\n cv2.circle(image, (round(xc), round(yc)), 4, (255, 255, 0), -1)\n plt.figure()\n plt.imshow(image[:, :, ::-1])\n plt.show()\n\n ratio = (gw / fH, gh / fW)\n\n xfg_temp = fW - (fH - yfg)\n yfg = xfg\n xfg = xfg_temp\n\n xfy_temp = fW - (fH - yfy)\n yfy = xfy\n xfy = xfy_temp\n\n # Compute the angle thymio has\n angle = give_thymio_angle(image, xfy, yfy, xfg, yfg)\n\n x2g = x2g * ratio[0]\n x2y = x2y * ratio[0]\n y2g = y2g * ratio[1]\n y2y = y2y * ratio[1]\n\n # compute the center of the thymio & gives thymio angle\n xc = (x2g + x2y) / 2\n yc = (y2g + y2y) / 2\n\n # plot the image with the drawings and print the X,Y coordinate and the angle\n xc = xc - 2.5\n yc = yc - 2.5\n yc = 72.5 - yc\n\n return [xc, yc, angle], image", "def main():\n \n #\n # Initialization\n #\n ref_time = time.time()\n output_string = '' \n cv2.namedWindow('frame', cv2.WINDOW_GUI_NORMAL+cv2.WINDOW_AUTOSIZE)\n \n #\n # Open the capture device and print some\n # useful properties\n #\n cap = cv2.VideoCapture(0)\n if cap.isOpened():\n #cap.set(cv.CV_CAP_PROP_FRAME_WIDTH, 320)\n #cap.set(cv.CV_CAP_PROP_FRAME_HEIGHT, 240)\n \n frameWidth = cap.get(cv.CV_CAP_PROP_FRAME_WIDTH)\n frameHeight = cap.get(cv.CV_CAP_PROP_FRAME_HEIGHT)\n \n print 'frame: width {}, height {}'.format(frameWidth, frameHeight)\n\n #\n # Parameters for Lucas-Kanade optical flow\n #\n lk_params = dict( winSize = (15,15),\n maxLevel = 2,\n criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n\n #\n # Predefine points to track\n #\n track_points = np.array([[[220.0, 120.0]],\n [[220.0, 200.0]],\n [[220.0, 280.0]],\n [[220.0, 360.0]],\n [[420.0, 120.0]],\n [[420.0, 200.0]],\n [[420.0, 280.0]],\n [[420.0, 360.0]]], 'float32')\n \n #\n # Take first frame and find corners in it\n #\n cap_ok, frame = cap.read()\n if not cap_ok:\n sys.exit()\n\n prev_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n print 'rel_time,p0dx,p0dy,p1dx,p1dy,p2dx,p2dy,p3dx,p3dy,p4dx,p4dy,p5dx,p5dy,p6dx,p6dy,p7dx,p7dy'\n\n while(True):\n\n cap_ok, frame = cap.read()\n if not cap_ok:\n break\n \n curr_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n #\n # Calculate optical flow\n #\n next_points, st, err = cv2.calcOpticalFlowPyrLK(prev_frame, curr_frame, track_points, None, **lk_params)\n\n #\n # Iterate through points and display on video frame\n # as well as output a CSV formated value list\n #\n for point_index in range(0, track_points.shape[0]):\n \n #\n # Display results on video frame\n #\n track_point = np.int0(track_points[point_index])\n x0,y0 = track_point.ravel()\n cv2.circle(frame, (x0,y0), 5, (0,255,0), -1)\n\n next_point = np.int0(next_points[point_index])\n x1,y1 = next_point.ravel()\n cv2.circle(frame, (x1,y1), 5, (0,0,255), -1)\n\n #\n # Build CSV string\n #\n output_string += ',{:.2f},{:.2f}'.format(x0-x1, y0-y1)\n \n #\n # Print out some data in a CSV format for graphing\n #\n now = time.time() - ref_time \n print '{:.2f}{}'.format(now, output_string)\n output_string = ''\n\n #\n # Display result and check for escape key\n #\n cv2.imshow('frame',frame)\n k = cv2.waitKey(1) & 0xff\n if k == 27:\n break\n\n #\n # Now update the previous frame and previous points\n #\n prev_frame = curr_frame.copy()\n\n cv2.destroyAllWindows()\n cap.release()", "def display_video_stream(self):\n _, frame = self.capture.read()\n\n if frame is None:\n return\n\n # hand rectangle dimensions\n x0 = self.video_size.height() - int(self.handrect_y + self.handrect_height / 2.)\n x1 = self.video_size.height() - int(self.handrect_y - self.handrect_height / 2.)\n y0 = int(self.handrect_x - self.handrect_width / 2.)\n y1 = int(self.handrect_x + self.handrect_width / 2.)\n cv2_p0 = (y0, x0)\n cv2_p1 = (y1, x1)\n self.video_size.width()\n\n frame, count_defects = find_gesture(frame, hand_p0=cv2_p1, hand_p1=cv2_p0,\n invert=self.checkBox_invert.isChecked())\n\n if count_defects == Gestures.ONE.value:\n cv2.putText(frame, \"closed fist\", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)\n elif count_defects == 2:\n cv2.putText(frame, \"Two fingers\", (5, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, 2)\n\n elif count_defects == 3:\n cv2.putText(frame, \"three fingers\", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)\n elif count_defects == 4:\n cv2.putText(frame, \"four fingers\", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)\n else:\n cv2.putText(frame, \"five fingers\", (50, 50), \\\n cv2.FONT_HERSHEY_SIMPLEX, 2, 2)\n\n if count_defects != self.current_gesture or count_defects == Gestures.ONE.value:\n # update progress time\n self.gesture_time = time()\n self.progressBar.setValue(0)\n else:\n self.current_gesture = count_defects\n\n dt = time() - self.gesture_time\n\n if dt < self.maxdt:\n # update progress bar\n self.progressBar.setValue(int(100*dt/self.maxdt))\n else:\n # trigger action\n cbox = self.comboboxes[max(min(4, count_defects-2), 0)]\n func_idx = cbox.currentIndex()\n callfunc = cbox.itemData(func_idx)\n callfunc()\n self.labelStatus.setText(\"{}, {}\".format(count_defects, cbox.itemText(func_idx)))\n\n # reset progress time\n self.gesture_time = time()\n\n self.current_gesture = count_defects\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n image = QImage(frame, frame.shape[1], frame.shape[0],\n frame.strides[0], QImage.Format_RGB888)\n self.image_label.setPixmap(QPixmap.fromImage(image))", "def camera_scan(h, cam, direction):\n if direction == \"right\":\n offset = -1.0\n elif direction == \"left\":\n offset = 1.0\n else:\n offset = 0.0\n\n\n cam.set_pan_tilt(offset * 1.3, 0.5)\n time.sleep(1)\n h.rotate(\"head_pan_joint\", offset * 1.3)\n time.sleep(4)\n h.rotate(\"head_tilt_joint\", 0.5)\n time.sleep(2)\n cam.set_pan_tilt(offset * 1.3, 0.3)\n time.sleep(1)\n h.rotate(\"head_tilt_joint\", 0.3)\n cam.set_pan_tilt(offset * 1.3, 0.0)\n time.sleep(1)\n h.rotate(\"head_tilt_joint\", 0.0)\n\n\n cam.set_pan_tilt(offset * 1.3 + 0.4, 0.5)\n time.sleep(1)\n h.rotate(\"head_pan_joint\", offset * 1.3 + 0.4)\n time.sleep(2)\n h.rotate(\"head_tilt_joint\", 0.5)\n time.sleep(2)\n cam.set_pan_tilt(offset * 1.3 + 0.4, 0.3)\n time.sleep(1)\n h.rotate(\"head_tilt_joint\", 0.3)\n cam.set_pan_tilt(offset * 1.3 + 0.4, 0.0)\n time.sleep(1)\n h.rotate(\"head_tilt_joint\", 0.0)\n \n cam.set_pan_tilt(offset * 1.3 - 0.4, 0.5)\n time.sleep(1)\n h.rotate(\"head_pan_joint\", offset * 1.3 - 0.4)\n time.sleep(2)\n h.rotate(\"head_tilt_joint\", 0.5)\n time.sleep(2)\n cam.set_pan_tilt(offset * 1.3 - 0.4, 0.3)\n time.sleep(1)\n h.rotate(\"head_tilt_joint\", 0.3)\n cam.set_pan_tilt(offset * 1.3 - 0.4, 0.0)\n time.sleep(1)\n h.rotate(\"head_tilt_joint\", 0.0)", "def showAstrometry(exposure, wcs, allMatches, useMatches, frame=0, title=None, pause=False):\n import lsst.afw.display.ds9 as ds9\n ds9.mtv(exposure, frame=frame, title=title)\n\n useIndices = set(m.second.getId() for m in useMatches)\n\n radii = []\n with ds9.Buffering():\n for i, m in enumerate(allMatches):\n x, y = m.second.getX(), m.second.getY()\n pix = wcs.skyToPixel(m.first.getCoord())\n\n isUsed = m.second.getId() in useIndices\n if isUsed:\n radii.append(numpy.hypot(pix[0] - x, pix[1] - y))\n\n color = ds9.YELLOW if isUsed else ds9.RED\n\n ds9.dot(\"+\", x, y, size=10, frame=frame, ctype=color)\n ds9.dot(\"x\", pix[0], pix[1], size=10, frame=frame, ctype=color)\n\n radii = numpy.array(radii)\n print \"<dr> = %.4g +- %.4g pixels [%d/%d matches]\" % (radii.mean(), radii.std(),\n len(useMatches), len(allMatches))\n\n if pause:\n import sys\n while True:\n try:\n reply = raw_input(\"Debugging? [p]db [q]uit; any other key to continue... \").strip()\n except EOFError:\n reply = \"\"\n\n reply = reply.split()\n if len(reply) > 1:\n reply, _ = reply[0], reply[1:]\n if reply == \"p\":\n import pdb;pdb.set_trace()\n elif reply == \"q\":\n sys.exit(1)\n else:\n break", "def draw_target_info(img, rv, tv):\n yaw, pitch, roll = [math.degrees(angle) for angle in get_target_angles(rv)]\n cv2.putText(img, f\"Yaw: {yaw:.2f}, Pitch: {pitch:.2f}, Roll: {roll:.2f}\",\n (0, img.shape[0] - 40), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255))\n cv2.putText(img, f\"Translation: ({tv[0][0]:.2f}, {tv[1][0]:.2f}, {tv[2][0]:.2f})\",\n (0, img.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255))\n\n # Re-project to visualize\n l = 20\n points, jacobian = cv2.projectPoints(np.array([\n [0, 0, 0], # Center\n [l, 0, 0], # X axis\n [0, l, 0], # Y axis\n [0, 0, l], # Z axis\n ], dtype=\"double\"), rv, tv, CAMERA_MATRIX, CAMERA_DIST_COEFFS)\n points = [(int(point[0][0]), int(point[0][1])) for point in points]\n cv2.arrowedLine(img, points[0], points[1], (0, 0, 255))\n cv2.arrowedLine(img, points[0], points[2], (255, 0, 0))\n cv2.arrowedLine(img, points[0], points[3], (0, 255, 0))", "def lookThruAndFrame(obj):\n cmds.lookThru(obj)\n # Position the active camera to view the active objects\n pm.viewFit()\n\n # Position cameraShape-1 to view all objects\n pm.viewFit(obj, all=True)\n\n # Fill 50 percent of the active view with active objects\n pm.viewFit(f=0.5)\n pm.viewFit(all=True)", "def plot_trace_on_frame(frame, dlc_df, cam):\r\n # Define colors\r\n colors = {'tail_start': '#636EFA',\r\n 'nose_tip': '#636EFA',\r\n 'paw_l': '#EF553B',\r\n 'paw_r': '#00CC96',\r\n 'pupil_bottom_r': '#AB63FA',\r\n 'pupil_left_r': '#FFA15A',\r\n 'pupil_right_r': '#19D3F3',\r\n 'pupil_top_r': '#FF6692',\r\n 'tongue_end_l': '#B6E880',\r\n 'tongue_end_r': '#FF97FF'}\r\n # Threshold the dlc traces\r\n dlc_df = likelihood_threshold(dlc_df)\r\n # Features without tube\r\n features = np.unique(['_'.join(x.split('_')[:-1]) for x in dlc_df.keys() if 'tube' not in x])\r\n # Normalize the number of points across cameras\r\n dlc_df_norm = pd.DataFrame()\r\n for feat in features:\r\n dlc_df_norm[f'{feat}_x'] = dlc_df[f'{feat}_x'][0::int(SAMPLING[cam] / 10)]\r\n dlc_df_norm[f'{feat}_y'] = dlc_df[f'{feat}_y'][0::int(SAMPLING[cam] / 10)]\r\n # Scatter\r\n plt.scatter(dlc_df_norm[f'{feat}_x'], dlc_df_norm[f'{feat}_y'], alpha=0.05, s=2, label=feat, c=colors[feat])\r\n\r\n plt.axis('off')\r\n plt.imshow(frame, cmap='gray')\r\n plt.tight_layout()\r\n\r\n ax = plt.gca()\r\n if cam == 'body':\r\n plt.title(f'{cam.capitalize()} camera')\r\n return ax\r\n # For left and right cam plot whisker pad rectangle\r\n # heuristic: square with side length half the distance between nose and pupil and anchored on midpoint\r\n p_nose = np.array(dlc_df[['nose_tip_x', 'nose_tip_y']].mean())\r\n p_pupil = np.array(dlc_df[['pupil_top_r_x', 'pupil_top_r_y']].mean())\r\n p_anchor = np.mean([p_nose, p_pupil], axis=0)\r\n dist = np.linalg.norm(p_nose - p_pupil)\r\n rect = matplotlib.patches.Rectangle((int(p_anchor[0] - dist / 4), int(p_anchor[1])), int(dist / 2), int(dist / 3),\r\n linewidth=1, edgecolor='lime', facecolor='none')\r\n ax.add_patch(rect)\r\n # Plot eye region zoom\r\n inset_anchor = 0 if cam == 'right' else 0.5\r\n ax_ins = ax.inset_axes([inset_anchor, -0.5, 0.5, 0.5])\r\n ax_ins.imshow(frame, cmap='gray', origin=\"lower\")\r\n for feat in features:\r\n ax_ins.scatter(dlc_df_norm[f'{feat}_x'], dlc_df_norm[f'{feat}_y'], alpha=1, s=0.001, label=feat, c=colors[feat])\r\n ax_ins.set_xlim(int(p_pupil[0] - 33 * RESOLUTION[cam] / 2), int(p_pupil[0] + 33 * RESOLUTION[cam] / 2))\r\n ax_ins.set_ylim(int(p_pupil[1] + 38 * RESOLUTION[cam] / 2), int(p_pupil[1] - 28 * RESOLUTION[cam] / 2))\r\n ax_ins.axis('off')\r\n # Plot tongue region zoom\r\n p1 = np.array(dlc_df[['tube_top_x', 'tube_top_y']].mean())\r\n p2 = np.array(dlc_df[['tube_bottom_x', 'tube_bottom_y']].mean())\r\n p_tongue = np.nanmean([p1, p2], axis=0)\r\n inset_anchor = 0 if cam == 'left' else 0.5\r\n ax_ins = ax.inset_axes([inset_anchor, -0.5, 0.5, 0.5])\r\n ax_ins.imshow(frame, cmap='gray', origin=\"upper\")\r\n for feat in features:\r\n ax_ins.scatter(dlc_df_norm[f'{feat}_x'], dlc_df_norm[f'{feat}_y'], alpha=1, s=0.001, label=feat, c=colors[feat])\r\n ax_ins.set_xlim(int(p_tongue[0] - 60 * RESOLUTION[cam] / 2), int(p_tongue[0] + 100 * RESOLUTION[cam] / 2))\r\n ax_ins.set_ylim(int(p_tongue[1] + 60 * RESOLUTION[cam] / 2), int(p_tongue[1] - 100 * RESOLUTION[cam] / 2))\r\n ax_ins.axis('off')\r\n\r\n plt.title(f'{cam.capitalize()} camera')\r\n return ax", "def detect_and_display(self, frame): \n return self.__display_faces(\n self.__detect_faces(frame),\n frame\n )", "def analyse_swing(frame, predictions, reference, metadata):\n keypoints = get_keypoints(predictions, metadata)\n analysis_dict = calculate_analysis_dict(keypoints)\n vis_frame = draw_keypoints(frame, keypoints, analysis_dict, reference)\n vis_frame = output_angles(vis_frame, analysis_dict, reference)\n return vis_frame", "def write_hud(self, frame):\n\n class HUD:\n def __init__(self, def_color=(255, 170, 0)):\n self.def_color = def_color\n self.infos = []\n def add(self, info, color=None):\n if color is None: color = self.def_color\n self.infos.append((info, color))\n def draw(self, frame):\n i=0\n for (info, color) in self.infos:\n cv2.putText(frame, info, (0, 30 + (i * 30)),\n cv2.FONT_HERSHEY_SIMPLEX,\n 1.0, color, 2) #lineType=30)\n i+=1\n \n\n hud = HUD()\n tello_color = (0,255,0)\n if self.debug: hud.add(datetime.datetime.now().strftime('%H:%M:%S'))\n hud.add(f\"FPS {self.fps.get():.2f}\")\n if self.debug: hud.add(f\"VR {self.video_encoder_rate}\")\n\n hud.add(f\"BAT {self.battery}\")\n if self.is_flying:\n hud.add(\"FLYING\", (0,255,0))\n else:\n hud.add(\"NOT FLYING\", (0,0,255))\n hud.add(f\"TRACKING {'ON' if self.tracking else 'OFF'}\", (0,255,0) if self.tracking else (0,0,255) )\n hud.add(f\"EXPO {self.exposure}\")\n #hud.add(f\"ALT {self.ref_pos_x}\")\n \n\n if self.hand_ctrl:\n hud.add(f\"HAND Ctrl {self.ref_pos_x}\",(0,0,255))\n hud.add(f\"HEAD_HAND_DIST {self.head_hand_x_ref - self.head_hand_x_dist}\")\n if self.axis_speed['yaw'] > 0:\n hud.add(f\"CW {self.axis_speed['yaw']}\", tello_color)\n elif self.axis_speed['yaw'] < 0:\n hud.add(f\"CCW {-self.axis_speed['yaw']}\", tello_color)\n else:\n hud.add(f\"CW 0\")\n if self.axis_speed['roll'] > 0:\n hud.add(f\"RIGHT {self.axis_speed['roll']}\", tello_color)\n elif self.axis_speed['roll'] < 0:\n hud.add(f\"LEFT {-self.axis_speed['roll']}\", tello_color)\n else:\n hud.add(f\"RIGHT 0\")\n if self.axis_speed['pitch'] > 0:\n hud.add(f\"FORWARD {self.axis_speed['pitch']}\", tello_color)\n elif self.axis_speed['pitch'] < 0:\n hud.add(f\"BACKWARD {-self.axis_speed['pitch']}\", tello_color)\n else:\n hud.add(f\"FORWARD 0\")\n if self.axis_speed['throttle'] > 0:\n hud.add(f\"UP {self.axis_speed['throttle']}\", tello_color)\n elif self.axis_speed['throttle'] < 0:\n hud.add(f\"DOWN {-self.axis_speed['throttle']}\",tello_color)\n else:\n hud.add(f\"UP 0\")\n if self.keep_distance: \n hud.add(f\"Target distance: {self.keep_distance} - curr: {self.target_height}\", (0,255,0))\n #if self.target_height: self.graph_distance.new_iter([self.target_height])\n if self.timestamp_take_picture: hud.add(\"Taking a picture\",tello_color)\n if self.palm_landing:\n hud.add(\"Palm landing...\", tello_color)\n if self.palm_landing_approach:\n hud.add(\"In approach for palm landing...\", tello_color)\n if self.tracking and not self.body_in_prev_frame and time.time() - self.timestamp_no_body > 0.5:\n hud.add(\"Searching...\", tello_color)\n if self.manual_control:\n hud.add(\"Manual Control...\", tello_color)\n if self.throw_ongoing:\n hud.add(\"Throw ongoing...\", tello_color)\n if self.scheduled_takeoff:\n seconds_left = int(self.scheduled_takeoff - time.time())\n hud.add(f\"Takeoff in {seconds_left}s\")\n\n hud.draw(frame)\n \n return frame", "def get_frame(cap):\n\n #camera matrix for camera calibration\n mtx = np.array(np.mat(\"588.4525598886621, 0, 301.8008794717551; 0, 588.9763096391521, 242.617026416902; 0, 0, 1\"))\n\n #distrotion coefficients for camera calibration\n dist = np.array(np.mat(\"-0.4351555722591889, 0.2082765081608728, -0.006072767012672472, 0.008139871640987759, 0\"))\n\n #get image frame from the camera\n ret, frame = cap.read()\n\n return frame\n\n h, w = frame.shape[:2]\n\n #get the new optimal camera matrix and the roi which can be used to crop the result\n newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),0,(w,h))\n\n #get the undistroted image\n dst = cv2.undistort(frame, mtx, dist, None, newcameramtx)\n\n x,y,w,h = roi\n\n #get the cropped image\n dst = dst[y:y+h, x:x+w]\n h, w = dst.shape[:2]\n\n #furthur crop the image to reduce the size of arena\n dst = dst[int(h/7):int(h*6/7), int(w/7):int(w*6/7)]\n\n #resize the arena to ARENA_SIZE\n dst = cv2.resize(dst, ARENA_SIZE, interpolation= cv2.INTER_CUBIC)\n\n return dst", "def play(self, frame):\n cv2.imshow(\"view\", frame)\n return 0", "def make_frame(t):\r\n mlab.view(360*t/duration, 90) # camera angle\r\n return mlab.screenshot(antialiased=True) # return a RGB image\r", "def digi_rotate(t0, t1, rpm, path):\n # film given\n vid = cv2.VideoCapture(path)\n\n # collecting frame values from film\n fps = int(vid.get(cv2.CAP_PROP_FPS))\n width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))\n dim = (width, height)\n\n # find the starting frame position\n start = int(fps * t0)\n\n # if default, variable becomes the entire length of the film\n # else it becomes the length the user desires\n if t1 == 0:\n # error gets thrown if you go the complete end, so I stop 5\n # frames before\n frames = int(vid.get(cv2.CAP_PROP_FRAME_COUNT)) - 5\n else:\n frames = int(fps * (t1 - t0))\n\n # remove extension of film\n path = path[:-4]\n # create new file name\n output = path + '-rot.mp4'\n # codecc and new film to be outputted\n fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')\n video_writer = cv2.VideoWriter(output, fourcc, fps, dim)\n\n # poly are used to blackout area outside of selection\n # center is used as the axis of rotation\n poly1, poly2, center = interact.selection_window(vid, dim, start)\n\n # each rotation step\n # the negative is for the right hand rule\n # -1 * (360 deg / 1 rot) * (1 min / 60 sec) * rpm * fps\n dtheta = -1 * 6 * rpm / fps\n\n # performing rotation\n for i in range(frames):\n # collect current frame and resize\n ret, frame = vid.read()\n frame = cv2.resize(frame, dim, interpolation=cv2.INTER_CUBIC)\n\n # blackout region outside\n cv2.fillPoly(frame, np.array([poly1, poly2]), 0)\n cv2.circle(frame, center, 4, (255,0,0), -1)\n\n # rotate frame\n M = cv2.getRotationMatrix2D(center, i*dtheta, 1.0)\n frame = cv2.warpAffine(frame, M, dim)\n\n # re-center and re-size frame\n frame = interact.center_frame(frame, center[0], center[1], dim)\n centered = cv2.resize(frame, dim, interpolation=cv2.INTER_CUBIC)\n # write new frame to output\n video_writer.write(centered)\n\n # save output\n video_writer.release()", "def frame_analysis_green(fW, fH, frame):\n # Compute the ratio to go from pixel coordinate to grid coordinate\n cam_grid_ratio = (gw / fW, gh / fH)\n\n # Compute the green mask needed to find thymio\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(hsv, low_green, up_green)\n\n # Find to contours of the square in order to compute the center of it\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]\n areas = [cv2.contourArea(c) for c in contours]\n\n # If we don't find the square, return impossible value for the followin code\n # to know no measurement were possible\n if len(areas) < 1:\n\n # Display the resulting frame\n x2, y2 = (-1, -1)\n xf, yf = (-1, -1)\n\n else:\n\n # Find the largest moving object in the image\n max_index = np.argmax(areas)\n cnt = contours[max_index]\n x, y, w, h = cv2.boundingRect(cnt)\n\n # Find the center of the green square\n xf = x + int(w / 2)\n yf = y + int(h / 2)\n\n # Change from pixel coordinate to grid coordinate\n x2 = xf * cam_grid_ratio[0]\n y2 = gh - yf * cam_grid_ratio[1]\n\n frame = frame[:, :, ::-1]\n\n return x2, y2, xf, yf, frame", "def printTape(self):\n print(self.loadedTape.tape)", "def output_angles(frame, analysis_dict, reference):\n y_pos = 20\n for key, value in analysis_dict.items():\n if key in reference.keys():\n text = \"{}: Angle = {:.2f}, Diff = {:.2f}\".format(key, value, value - reference[key])\n cv2.putText(frame, text, (0, y_pos), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 0), 2,\n cv2.LINE_AA)\n cv2.putText(frame, text, (0, y_pos), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 1,\n cv2.LINE_AA)\n y_pos += 20\n return frame", "def test_calculate_tilt(tilt_reference):\n tilt_rad = np.radians(tilt_reference)\n # get the rotation axis\n # NOTE:\n # we need to tilt the image -tilt in order to get the tilt angle as + value.\n rot_aixs_tilted = get_tilted_rot_axis(tilt_inplane=-tilt_rad, tilt_outplane=0.0)\n # radiograph at 0 deg\n img0 = virtual_cam(two_sphere_system(0, rot_aixs_tilted, size=200))\n # radiograph at 180 deg\n img180 = virtual_cam(two_sphere_system(np.pi, rot_aixs_tilted, size=200))\n # calculate the tilt angle\n tilt_angle = calculate_tilt(img0, img180).x\n # verify\n # NOTE: tolerance is set to half a pixel at the edge of the FOV\n np.testing.assert_allclose(tilt_angle, tilt_reference, atol=np.degrees(0.5 / 100))", "def print_screen(history):\n\n # Extract the angles from the distance data. Usually this won't change \n # from scan to scan, but there are scanning algorithms where that may not \n # be the case \n angles = []\n for h in history:\n angles = list(set(angles + h.keys())) \n angles.sort()\n\n # Create a 2D grid of characters. Essentially a \"screen buffer\"\n buff = {}\n for angle in angles:\n buff[angle] = ' '.rjust(120)\n\n \n blips = ['.', '*', '#', '@']\n blips = blips[-len(history):] # if we only have 2, take last 2 blips\n\n # Plot blips onto buffer \n for h in history:\n blip = blips.pop(0) if len(blips) else '.'\n for angle in angles:\n if angle not in h: continue\n dist = h[angle]\n if dist < 120:\n buff[angle] = set_char_at(buff[angle], dist, blip)\n\n # Output\n print \"\\n\\n\\n\"\n for angle in angles:\n obstacle = 'x' if '@' in buff[angle][0:30] else ' '\n print \"%s %s %s\" % (str(angle).rjust(5), obstacle, buff[angle])\n print '20cm'.rjust(30) + '50cm'.rjust(30) + '1m'.rjust(50)", "def snapFrame(camera):\n return camera.read()[1]", "def make_frame(t):\n mlab.view(azimuth= 100*t/duration, distance=100) # roll camera angle\n f = mlab.gcf()\n f.scene._lift()\n return mlab.screenshot(antialiased=True) # return a RGB image", "def display_frame(self, frame=None):\n if frame is None:\n frame = self.get_frame()\n cv2.namedWindow('frame')\n cv2.imshow('frame', frame)\n cv2.waitKey(0)", "def trace_display_test(tracker_record, obj_id='0', img_path=None):\n BBox, BFrame = zip(*tracker_record[obj_id]['list'])\n Center_BBox = get_box_center(BBox)\n print(Center_BBox)\n img = cv2.imread(img_path)\n for elem in Center_BBox:\n print(elem)\n cv2.circle(img, (int(elem[0]), int(elem[1])), 3, (0, 0, 255), 3)\n cv2.namedWindow('img_1', cv2.WINDOW_NORMAL)\n cv2.imshow('img_1', img)\n cv2.waitKey()", "def video_stabilizer(self, video=None):\r\n def in_roi(roi, p):\r\n x, y = p\r\n return roi['x1'] < x < roi['x2'] and roi['y1'] < y < roi['y2']\r\n\r\n \r\n if video is None:\r\n video = self.video_buffer\r\n stab_video = np.zeros_like(video)\r\n roi = self.get_roi(video=video, window_name='Draw ROI to stabilize the video around it')\r\n\r\n # params for ShiTomasi corner detection\r\n feature_params = dict(maxCorners=800,\r\n qualityLevel=0.01,\r\n minDistance=3,\r\n blockSize=3)\r\n \r\n # Parameters for lucas kanade optical flow\r\n lk_params = dict(winSize=(15, 15),\r\n maxLevel=8,\r\n criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\r\n \r\n m_dx, m_dy = 0, 0\r\n \r\n # Take first frame and find corners in it\r\n old_frame = video[0]\r\n \r\n rows, cols, depth = old_frame.shape\r\n old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\r\n \r\n p0 = cv2.goodFeaturesToTrack(old_gray, \r\n mask=None, \r\n **feature_params)\r\n p0 = np.expand_dims([p for p in p0.squeeze() if in_roi(roi, p)], 1)# p0.copy()\r\n \r\n for idx in tqdm(range(len(video))):\r\n \r\n # Get next frame\r\n frame = video[idx]\r\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n \r\n # calculate optical flow\r\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)\r\n \r\n # Select good points\r\n try:\r\n good_cur = p1[np.where(st == 1)]\r\n good_old = p0[np.where(st == 1)]\r\n except TypeError as e:\r\n print('TypeError, no good points are avaliabole, error: {0}'.format(e))\r\n print('Exit video stabilizer at frame {0} out of {1}'.format(idx, self.length))\r\n break\r\n \r\n dx = []\r\n dy = [] \r\n \r\n # Draw points and calculate\r\n for i, (cur, old) in enumerate(zip(good_cur, good_old)):\r\n a, b = cur.ravel()\r\n c, d = old.ravel()\r\n dx.append(c - a)\r\n dy.append(d - b)\r\n \r\n m_dx += np.mean(dx)\r\n m_dy += np.mean(dy)\r\n print(m_dx,m_dy)\r\n \r\n M = np.float32([[1, 0, m_dx], [0, 1, m_dy]])\r\n \r\n stab_video[idx] = cv2.warpAffine(frame, M, (cols, rows), \r\n cv2.INTER_NEAREST|cv2.WARP_INVERSE_MAP, \r\n cv2.BORDER_CONSTANT).copy()\r\n\r\n marked = stab_video[idx].copy()\r\n for p in np.squeeze(good_cur):\r\n marked = cv2.circle(marked, tuple(p.astype(int)), 5, (255,0,0), 2)\r\n cv2.imshow('stab', marked)\r\n cv2.waitKey(0)\r\n\r\n\r\n\r\n # Update the previous frame and previous points\r\n old_gray = frame_gray.copy()\r\n p0 = good_cur.reshape(-1, 1, 2)\r\n cv2.destroyAllWindows()\r\n return stab_video", "def make_frame(t):\r\n duration = 6 # duration of the animation in seconds\r\n mlab.view(azimuth=360 * t / duration) # camera angle\r\n\r\n return mlab.screenshot(antialiased=True) # return a RGB image\r", "def analyze_movie(\n video_path, aspect_ratio=0, palette_size=32, frames=-1, step=1, show_frames=False, show_last_frame=False, color_format='hex'\n):\n\n # Parse video frame-by-frame\n vidcap = cv2.VideoCapture(video_path)\n success, image = vidcap.read()\n pil_img = None\n count = 0\n while success and frames == -1 or count < frames:\n if count % step == 0:\n # Convert to PIL image\n img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n pil_img = Image.fromarray(img)\n\n # Crop frame to remove border\n if aspect_ratio != 0:\n width, height = pil_img.size\n left = 0\n right = width\n content_height = 1/aspect_ratio * width\n border = (height - content_height) * 0.5\n top = border\n bottom = border + content_height\n pil_img = pil_img.crop((left, top, right, bottom))\n\n # Get primary color\n main_color = get_primary_color(\n pil_img, palette_size, show_img=show_frames)\n\n if color_format == 'hex':\n main_color = rgbToHex(main_color)\n \n print(main_color)\n\n # Attempt to read next frame\n success, image = vidcap.read()\n count += 1\n\n if show_last_frame:\n pil_img.show()", "def process_frame(self, raw_frame, detection_all):\n \n frame = raw_frame.copy()\n h,w,_ = frame.shape\n proximity = int(h/2.4)\n pic_proximity = int(h/1.45)\n min_distance = int(w/2)\n\n head_pos = (detection_all[4][0], detection_all[4][1])\n hand_pos = (detection_all[3][0], detection_all[3][1])\n \n \n #self.target_height = target_height\n self.target_height = detection_all[4][3]\n target_width = detection_all[4][2]\n target = (detection_all[4][0], detection_all[4][1])\n\n ref_x = int(w/2)\n ref_y = int(h*0.35)\n \n self.axis_speed = self.cmd_axis_speed.copy()\n\n #Is there a Picture Command ?\n if self.picture_approach:\n cls_number = int(self.classes_dict[self.picture_target])\n print (str(self.picture_target) + 'is' + str(cls_number))\n print (self.picture_target + ' values:' + str(detection_all[cls_number]))\n \n # If no pic target found --> rotate\n if (detection_all[cls_number][0] + detection_all[cls_number][1]) == 0:\n \n log.info(f'searching for {self.picture_target}')\n \n if time.time() - self.search_start_time < 8: \n self.axis_speed[\"yaw\"] = 60\n else:\n print('stopped searching after 8 seconds')\n self.axis_speed[\"yaw\"] = 0\n self.picture_approach = False\n \n # If pic target found set as new tracking target\n else:\n print('pic target found')\n self.axis_speed[\"yaw\"] = 0\n if self.timestamp_pic_target_found is None:\n self.timestamp_pic_target_found = time.time()\n\n log.info(f'found {self.picture_target}')\n target = (detection_all[cls_number][0], detection_all[cls_number][1])\n self.target_height = detection_all[cls_number][3]\n \n #If Human Head:\n if cls_number == 4:\n self.keep_distance = pic_proximity*0.75\n else:\n self.keep_distance = pic_proximity\n\n self.pid_pitch = PID(0.15,0.0,0.1,setpoint=0,output_limits=(-30,30))\n self.tracking = True\n \n # If voice command 'come home' activate RTH\n if self.rth:\n self.target_height = detection_all[4][3]\n target_width = detection_all[4][2]\n target = (detection_all[4][0], detection_all[4][1])\n self.keep_distance = proximity*0.75\n self.toggle_tracking(tracking=True)\n\n if self.timestamp_take_picture:\n if time.time() - self.timestamp_take_picture > 2:\n self.timestamp_take_picture = None\n self.drone.take_picture()\n else:\n\n if self.tracking: \n if target != (0,0): \n if self.distance_mode: \n # Locked distance mode\n if self.keep_distance is None:\n self.keep_distance = self.target_height\n #self.graph_distance = RollingGraph(window_name=\"Distance\", y_max=500, threshold=self.keep_distance, waitKey=False)\n \n if self.palm_landing_approach:\n self.keep_distance = proximity\n self.timestamp_keep_distance = time.time()\n log.info(\"APPROACHING on pose\")\n self.pid_pitch = PID(0.2,0.0,0.1,setpoint=0,output_limits=(-30,30))\n #self.graph_distance = RollingGraph(window_name=\"Distance\", y_max=500, threshold=self.keep_distance, waitKey=False)\n\n self.body_in_prev_frame = True\n \n xoff = int(target[0]-ref_x)\n yoff = int(ref_y-target[1])\n\n #We draw an arrow from the reference point to the head we are targeting \n color = (0,0,255)\n cv2.circle(frame, (ref_x, ref_y), 10, color, 1,cv2.LINE_AA)\n cv2.line(frame, (ref_x, ref_y), target, color, 4)\n cv2.rectangle(frame, (target[0]-target_width//2, target[1]-self.target_height//2), \n (target[0]+target_width//2, target[1]+self.target_height//2),color,4)\n \n # The PID controllers calculate the new speeds for yaw and throttle\n self.axis_speed[\"yaw\"] = int(-self.pid_yaw(xoff))\n #log.debug(f\"xoff: {xoff} - speed_yaw: {self.axis_speed['yaw']}\")\n self.last_rotation_is_cw = self.axis_speed[\"yaw\"] > 0\n\n self.axis_speed[\"throttle\"] = int(-self.pid_throttle(yoff))\n #log.debug(f\"yoff: {yoff} - speed_throttle: {self.axis_speed['throttle']}\")\n\n #If in locked distance mode\n if self.keep_distance and self.target_height: \n \n # Check RTH\n if self.rth and self.target_height>=self.keep_distance:\n self.rth = False\n \n elif self.palm_landing_approach and self.target_height>self.keep_distance:\n # The drone is now close enough to the body\n # Let's do the palm landing\n log.info(\"PALM LANDING after approaching\")\n self.palm_landing_approach = False\n self.toggle_tracking(tracking=False)\n self.palm_land() \n \n elif self.picture_approach and \\\n abs(self.target_height-self.keep_distance) < 15 and \\\n xoff < 12 and yoff < 15:\n \n # The drone is now close enough to the pic target\n # Let's take a picture \n self.toggle_tracking(tracking=False)\n print('take a picture')\n self.drone.take_picture()\n self.picture_approach = False\n self.timestamp_pic_target_found = None\n self.pid_pitch = PID(0.3,0.0,0.1,setpoint=0,output_limits=(-70,70))\n \n \n else:\n self.axis_speed[\"pitch\"] = int(self.pid_pitch(self.target_height-self.keep_distance))\n log.debug(f\"Target distance: {self.keep_distance} - cur: {self.target_height} -speed_pitch: {self.axis_speed['pitch']}\")\n \n if abs(head_pos[1] - hand_pos[1])<30:\n if self.timestamp_hand_ctrl is None:\n self.timestamp_hand_ctrl = time.time()\n if time.time() - self.timestamp_hand_ctrl > 1:\n if self.head_hand_x_dist is None:\n self.head_hand_x_ref = head_pos[0]-hand_pos[0]\n \n self.hand_ctrl = True\n self.head_hand_x_dist = head_pos[0]-hand_pos[0]\n self.axis_speed[\"roll\"] = int(-self.pid_roll(self.head_hand_x_ref - self.head_hand_x_dist))\n #print (f'head hand X distance: {abs(head_pos[0]-hand_pos[0])}')\n\n else:\n self.hand_ctrl = False\n self.timestamp_hand_ctrl = None\n self.head_hand_x_dist = None\n\n else: # Tracking but no body detected\n if self.body_in_prev_frame:\n self.timestamp_no_body = time.time()\n self.body_in_prev_frame = False\n self.axis_speed[\"throttle\"] = self.prev_axis_speed[\"throttle\"]\n self.axis_speed[\"yaw\"] = self.prev_axis_speed[\"yaw\"]\n else:\n if time.time() - self.timestamp_no_body < 1:\n print(\"NO BODY SINCE < 1\", self.axis_speed, self.prev_axis_speed)\n self.axis_speed[\"throttle\"] = self.prev_axis_speed[\"throttle\"]\n self.axis_speed[\"yaw\"] = self.prev_axis_speed[\"yaw\"]\n else:\n log.debug(\"NO BODY detected for 1s -> rotate\")\n self.axis_speed[\"yaw\"] = self.def_speed[\"yaw\"] * (1 if self.last_rotation_is_cw else -1)\n \n\n # Send axis commands to the drone\n for axis, command in self.axis_command.items():\n if self.axis_speed[axis]is not None and self.axis_speed[axis] != self.prev_axis_speed[axis]:\n #log.debug(f\"COMMAND {axis} : {self.axis_speed[axis]}\")\n command(self.axis_speed[axis])\n self.prev_axis_speed[axis] = self.axis_speed[axis]\n else:\n # This line is necessary to display current values in 'self.write_hud'\n self.axis_speed[axis] = self.prev_axis_speed[axis]\n \n # Write the HUD on the frame\n frame = self.write_hud(frame)\n\n return frame", "def track_video(frames):\n first_frame = next(frames)\n lkt = lktrack.LKTracker(first_frame)\n lkt.detect_points()\n yield lkt.draw()\n\n for im in frames:\n lkt.step(im)\n lkt.track_points()\n yield lkt.draw()", "def plot_frame(tpf: TessTargetPixelFile, aperture=None, ax=None, savefn=None, frame=200, show_colorbar=True, **kwargs):\n if not ax:\n ax = plt.subplot(projection=tpf.wcs)\n # Set default plotting args\n kwargs['interpolation'] = 'nearest'\n kwargs['cmap'] = 'hot'\n kwargs['scale'] = 'sqrt'\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n tpf.plot(ax=ax, frame=frame, show_colorbar=show_colorbar, aperture_mask=aperture, **kwargs)\n with plt.style.context(MPLSTYLE):\n ax.coords[0].set_axislabel('Right Ascension')\n ax.coords[1].set_axislabel('Declination')\n # IF want to save\n if savefn:\n plt.gcf().savefig(savefn)\n return ax" ]
[ "0.6334015", "0.57378316", "0.5471469", "0.5451941", "0.54345995", "0.5249486", "0.5228421", "0.5206411", "0.5178842", "0.5156065", "0.51413107", "0.5130146", "0.5097734", "0.5094007", "0.50803936", "0.50718665", "0.5069257", "0.5052582", "0.50398207", "0.50334924", "0.50265783", "0.50162905", "0.50058734", "0.49928313", "0.4991558", "0.49579746", "0.49486223", "0.49371496", "0.49312997", "0.49246335" ]
0.67964447
0
Takes 3 arguments and creates an incremental list of numbers.
def createNumList(start, end, increment): start = int(start) end = int(end) increment = int(increment) numbers = [] while start < end: print(f"Loop: {start}.") numbers.append(start) start += increment print("Numbers:\n{}".format(numbers)) print(f"At the bottom i is {start}\n") return numbers
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def three_times_nums(num_list):", "def crange(*args):\r\n result = [[]]\r\n for arg in args:\r\n result = [x + [y] for x in result for y in range(arg)]\r\n return result", "def build_numeric_sequence(data: List[int]) -> List[str]:\n prev = -1\n start = None\n res = []\n for item in filter(None, sorted(set(data))):\n if prev + 1 == item:\n if not start:\n start = prev\n if res and res[-1] == prev:\n res.pop()\n else:\n if start:\n res.append('{}:{}'.format(start, prev))\n start = None\n res.append(item)\n prev = item\n if start:\n res.append('{}:{}'.format(start, prev))\n return [str(item) for item in res]", "def prepare_numbers(numbers):\n \n numb = []\n for item in numbers:\n numb.append(int(item))\n return numb", "def double_nums(num_list):", "def add_numbers(first_number, second_number):", "def numbers(num):\n r = []\n for i in range(num):\n d = len(r)\n r = [1 if i == 0 or i == d else r[i-1]+r[i] for i in range(d+1)]\n yield r", "def seq_ints(n, start=0, step=1):\n return list(range(start, start + n*abs(step), step))", "def grAList() -> list:\n return [2, 5, 6, 9, 10, 11, 13, 17, 18, 30]", "def number_list(number, existing_number_list):\n\n\texisting_number_list.append(number)\n\treturn existing_number_list", "def append(self, numbers):\n pre = self.elements[:-3:-1]\n '''concatenate last two elements and new list'''\n pro_list = pre+numbers\n if len(pro_list) >= 3:\n '''for n number there will be n-2 totals'''\n for i in range(len(pro_list)-2):\n self.total.append(sum(pro_list[i:i+3]))\n self.elements = self.elements + numbers", "def parseNumList(input):\n\tm = re.match(r'(\\d+)(?:-(\\d+))?(?:-(\\d+))?$', input)\n\t# ^ (or use .split('-'). anyway you like.)\n\tif not m:\n\t\traise ArgumentTypeError(\"'\" + input + \"' is not a range of number. Expected forms like '1-5' or '2' or '10-15-2'.\")\n\tstart = int(m.group(1))\n\tend = int(m.group(2))\n\tif m.group(3):\n\t\tincrement = int(m.group(3))\n\telse:\n\t\tincrement = 1\n\treturn list(range(start, end+1, increment))", "def generate_list(start: int, stop: int, step: int = 1) -> List[int]:\n # if start == stop:\n # print(start)\n # else:\n # res = []\n # while start < (stop + 1):\n # res.append(start)\n # start += step\n # print(res)\n\n return [item for item in range(start, (stop+step))]", "def make_list():\n num_list = []\n tries = 3\n for i in range(tries):\n number = get_input()\n try:\n number = int(number)\n except ValueError:\n print(\"Numbers Only.\")\n else:\n num_list[len(num_list):] = [number]\n return num_list", "def add_list_numbers(incoming_list: list):\n return sum(incoming_list)", "def populate_lists_loop() -> list:\r\n n = int(input(\"Enter the amount of desired lists: \"))\r\n total_ls = []\r\n for _ in range(n):\r\n ls = input(\r\n f\"Enter element for the list number {_+1} each seperated with comma: \"\r\n )\r\n ls = list(map(int, ls.split(\",\")))\r\n total_ls.append(ls)\r\n return total_ls", "def init_list(no_elements):\n\ti = 0\n\tnumbers\t= []\n\twhile i < no_elements:\n\t\tnumbers.append(i)\n\n\t\ti += 1\n\n\t# return initialized array\n\treturn numbers", "def three_sum(nums: List[int]) -> List[List[int]]:\n\n size = len(nums)\n res = list()\n if size < 3:\n return res\n #\n\n nums.sort()\n seen_dict = dict()\n for i in range(size - 2):\n num = nums[i]\n if num in seen_dict:\n continue\n #\n seen_dict[num] = 1\n two_sum_res = two_sum(nums, i + 1, -num)\n for item in two_sum_res:\n item.append(num)\n item.sort()\n res.append(item)\n #\n #\n return res", "def range() -> List[int]:\n pass", "def init_numbers():\n return ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')", "def sort_012(input_list):\n aux_dict = {}\n\n for i in input_list:\n if type(i) is not int or not 0 <= i <= 2:\n return \"Invalid argument\"\n\n if i not in aux_dict:\n aux_dict[i] = 0\n aux_dict[i] += 1\n \n return ([0] * aux_dict[0] if 0 in aux_dict else []) + \\\n ([1] * aux_dict[1] if 1 in aux_dict else []) + \\\n ([2] * aux_dict[2] if 2 in aux_dict else [])", "def create_range():\n limit1, limit2 = (int(num) for num in input(\"Please, specify range limits using space: \").split())\n nums_in_range = []\n for _ in range(limit1, limit2 + 1):\n nums_in_range.append(int(_))\n return limit1, limit2, nums_in_range", "def sequence(side_length):\r\n index = side_length\r\n numbers = []\r\n tmp1 = (index -1 ) / 2\r\n #numbers.append([index, 3, 5, 7, 9])\r\n for i in range(tmp1):\r\n if i == 0:\r\n numbers.append([3, 3, 5, 7, 9])\r\n else:\r\n diff = (3+i*2) - 1\r\n tmp2 = numbers[i-1][4] + diff\r\n numbers.append([3+i*2, tmp2, tmp2+diff, tmp2+diff*2, tmp2+diff*3])\r\n return numbers", "def make_executions(numbers):\n executions = []\n _numbers = numbers.copy()\n orig_len = len(numbers)\n for i in range(len(_numbers)):\n print(f\"Nbs left == {len(_numbers)} / {orig_len}\")\n executions += splice(_numbers)\n _numbers = _numbers[:-1]\n return executions", "def list_nums():\n max_num = int(input(\"What is the maximum number you would like to use?\"))\n num_list = []\n for x in range(2, max_num + 1):\n num_list.append(x)\n return num_list", "def renumerate(arr):\n return zip(reversed(range(len(arr))), reversed(arr))", "def subtraction_of(number_list):", "def number_list(l):\n return ['{i:>{s}}. {v}'.format(s=len(str(len(l))), i=i+1, v=l[i]) for i in range(len(l))]", "def rec_increment(p):\n if p == []:\n return []\n t = p.pop()\n return [t+1] + rec_increment(p)", "def add_position_recur(lst, number_from=0):\r\n # base case empty list returns the empty list\r\n if lst == []:\r\n return []\r\n\r\n else:\r\n initial_value = lst[0]\r\n return [initial_value + number_from] + \\\r\n add_position_recur(lst[1:], number_from + 1)" ]
[ "0.70656544", "0.64583635", "0.62111646", "0.6192229", "0.61834216", "0.6169007", "0.60984915", "0.60727", "0.60704976", "0.60547554", "0.6032393", "0.60077435", "0.5981581", "0.5950276", "0.59328985", "0.5851967", "0.58273876", "0.57553345", "0.57294756", "0.57179105", "0.5708004", "0.57063687", "0.5696453", "0.5675122", "0.5665779", "0.56622833", "0.5652089", "0.56489295", "0.5600914", "0.55929506" ]
0.6813803
1
Takes 3 arguments and creates an incremental list of characters.
def creatCharList(start, end, increment): start = int(start) end = int(end) increment = int(increment) characters = [] i = start for i in range(start, end, increment): print(f"Loop: {i}.") characters.append(chr(i)) print("Characters:\n{}".format(characters)) print(f"At the bottom i is {i}\n") return characters
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_chars(length, character):\n return ''.join([character for i in range(length)])", "def letters_generator():\n def multiletters(seq):\n for n in itertools.count(1):\n for s in itertools.product(seq, repeat=n):\n yield \"\".join(s)\n letters = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n return multiletters(letters)", "def generate_strings(char_list, length):\n if length <= 0:\n yield []\n elif length == 1:\n for char in char_list:\n yield [char]\n else:\n for char in char_list:\n for l in generate_strings(char_list, length-1):\n yield [char] + l", "def generate_n_chars(x,y):\n s = \"\" #set up a new string\n for i in range(x):#number that character need to repeat\n s = s+y\n return s#returns a string", "def genCharGroup(self):\n alphabet = list('abcdefghijklmnopqrstuvwxyz') #Creates a list of all the alphabet characters\n group = []\n count = 0\n while count != 3: #While the loop total does not equal 3\n i = random.choice(alphabet) #Make a random choice\n alphabet.remove(i) #Remove it from the alphabet\n group.append(i) #And add it to the group array\n count += 1 #Add one to the loop total\n return str(''.join(group)) #Return the string of 3 characters to the user", "def allstrings2(alphabet, length):\n\n c = []\n for i in range(length):\n c = [[x]+y for x in alphabet for y in c or [[]]]\n\n return c", "def generate_string_list(char_list, length):\n return [e for e in generate_strings(char_list, length)]", "def generate_alphabet_combinations(length: int = 2) -> List[str]:\n assert length > 0\n alphabets = string.ascii_lowercase\n\n return [\n ''.join(combination)\n for n in range(1, length+1)\n for combination in product(alphabets, repeat=n)\n ]", "def any_of_chars(*args:List[str]) -> str:\n # TODO uniq\n chars = \"\".join(args)\n return f\"[{chars}]\"", "def c_chars(x):\r\n return (c_char * len(x))(*x)", "def generateSubSequences(k, ch):\n seq = [\"\".join(c) for c in itertools.product(ch, repeat = k)]\n# discussion about the best way to do this:\n# https://stackoverflow.com/questions/7074051/what-is-the-best-way-to-generate-all-possible-three-letter-strings\n return seq", "def generateNchars(inputChar, inputNum):\n return inputChar * int(inputNum)", "def alphabator(lst):\r\n n = 0\r\n for item in lst:\r\n n += 1\r\n if isinstance(item, int) and 1 <= item <= 26:\r\n item = chr(64 + item)\r\n yield item", "def create_character() -> list:\n return [0, 0]", "def alpha_chars_pairs (text):\n alpha_text = list (alpha_chars (text))\n return itertools.combinations (alpha_text)", "def cands(inputs):\n # The below could probably be simplified a bit....\n return map(''.join, list(itertools.chain.from_iterable([ map (list, (itertools.permutations(inputs, x))) for x in range(4, len(inputs)+1)])))", "def n_char_generate(self,char,n):\n return char*n", "def examples_from_characters(chars: list[str], num_prev_chars: int) -> list[Example]:\n # TODO: (~6-7 lines) implement here :)\n examples = []\n for i in range(len(chars) - num_prev_chars):\n examples.append({\"text\": chars[i:i + num_prev_chars],\"target\": chars[i + num_prev_chars]})\n return examples", "def lettergen():\n for repeat in range(1, 10):\n for item in itertools.product(ascii_uppercase, repeat=repeat):\n yield \"\".join(item)", "def contsrep(contents, n=9):\n cs_rep = []\n for c in contents:\n if isinstance(c, Character) and c.code in printable_ascii_codes:\n c_str = chr(c.code)\n if cs_rep and isinstance(cs_rep[-1], str):\n cs_rep[-1] += c_str\n else:\n cs_rep.append(c_str)\n else:\n cs_rep.append(c)\n return truncate_list(cs_rep, n=n)", "def get_combo(starting_letter, length): # Apparently ngrams beyond bigrams only have two letter file names. Still keeping this for generality, but should always be run with length=2 in this context\n alpha = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',\n 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n\n combos = list(itertools.combinations(alpha, length - 1))\n combos = [starting_letter + ''.join(item) for item in combos]\n\n return combos", "def set_of_initials(i=3):\r\n\r\n return [''.join(_random.choice(string.ascii_uppercase) + '.'\r\n for x in xrange(i))]", "def init_letters():\n return ('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i',\n 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r',\n 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',\n 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I',\n 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R',\n 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z')", "def create_word(char_list):", "def gen_chars(self, lines_str_list):\n char_index_counter = 0\n chars = VGroup()\n for line_no in range(lines_str_list.__len__()):\n chars.add(VGroup())\n chars[line_no].add(\n *self.lines_text.chars[\n char_index_counter : char_index_counter\n + lines_str_list[line_no].__len__()\n + 1\n ]\n )\n char_index_counter += lines_str_list[line_no].__len__() + 1\n return chars", "def allstrings2(alphabet, length):\n c = []\n for i in range(length):\n c = [[x]+y for x in alphabet for y in c or [[]]]\n for value in c:\n \tfvalue = ''.join(value)\n \tprint fvalue\n return \"\"", "def characters():\n\n letter = \"a b c d e f g h i j k l m n o p q r s t u v w x y z\".split()\n sc = \"! @ # $ % ^ & * ( ) _ - + = ? : ;\".split()\n\n\n chars = []\n chars.append(random.choice(letter))\n chars.append(random.choice(letter).upper())\n chars.append(str(random.randint(0,9)))\n chars.append(random.choice(sc))\n\n return chars", "def char_range(c1, c2):\n for c in range(ord(c1), ord(c2)+1):\n yield c, chr(c)", "def str_to_c(cmd_x,lenth):\n i = 0\n cmd_r = [0 for x in range(lenth)]\n while (i<lenth):\n cmd_r[i]=ord(cmd_x[i])\n i+=1\n return cmd_r", "def str_to_c(cmd_x,lenth):\n i = 0\n cmd_r = [0 for x in range(lenth)]\n while (i<lenth):\n cmd_r[i]=ord(cmd_x[i])\n i+=1\n return cmd_r" ]
[ "0.6578999", "0.61975336", "0.61801773", "0.61747915", "0.61521375", "0.6133547", "0.6093438", "0.60465837", "0.59642506", "0.591356", "0.58994937", "0.58944756", "0.58723474", "0.585621", "0.58508706", "0.58382934", "0.58133805", "0.5773641", "0.5747958", "0.5738145", "0.5731326", "0.57295096", "0.57008976", "0.5698658", "0.5600892", "0.5587081", "0.5584521", "0.5579716", "0.55698407", "0.55698407" ]
0.6889811
0
Finds the divisors of x Assumes that x is a positive integer Returns a tuple containing the divisors of x
def finddiv(x): div = (1, x) for i in range(2, x//2+1): if x%i==0: div+=(i,) return div
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def divisors(x):\n x = abs(x)\n result = []\n upper_bound = int(math.sqrt(x))\n for i in range(1, upper_bound + 1):\n if x % i == 0:\n if x / i == i:\n result.append(i)\n else:\n result.append(i)\n result.append(x//i)\n return sorted(distinct(result))", "def proper_divisors(x):\n return divisors(x)[:-1]", "def restricted_divisors(x):\n return divisors(x)[1:-1]", "def divisors(n):\n return tuple(_divisor_gen(n))", "def getDivisors(n):", "def findDivisors(n1, n2):\n divisors = () # the empty tuple\n for i in range(1, min(n1, n2) + 1):\n if n1%i == 0 and n2%i == 0:\n divisors = divisors + (i,)\n return divisors", "def findDivisors(num1, num2):\n divisors = (1,)\n for i in range(2, (min(num1, num2) + 1)):\n if num1 % i == 0 and num2 % i == 0:\n divisors += (i,)\n return divisors", "def divisors(number: int) -> Set[int]:\n\n if number == 0:\n return {0}\n divisor = 2\n while divisor * divisor <= number:\n if number % divisor == 0:\n smaller_result = divisors(number // divisor)\n multiplied_result = {d * divisor for d in smaller_result}\n\n return smaller_result | multiplied_result\n divisor = divisor + 1\n\n return {1, number}", "def find_divisors(n: int) -> Set[int]:\n divisors = {1, n}\n for i in range(2, int(n ** 0.5) + 1):\n if n % i == 0:\n divisors.add(i)\n divisors.add(n // i)\n return divisors", "def find_divisors(n):\n\n\tpd = [1]\n\n\tsqrtN = int(math.sqrt(n))\n\n\tfor d in range(2, sqrtN+1):\n\t\tif n % d == 0:\n\t\t\tpd.append(d)\n\t\t\tpair = int(n/d)\n\t\t\tif not pair == d:\n\t\t\t\tpd.append(pair)\n\n\treturn pd", "def find_divisors_2(number):\n divisors = [n for n in range(1, number) if number % n == 0]\n return divisors", "def get_divisors(num):\n assert num != 0, \"Num is 0\"\n divisors = []\n sq_root = int(num**0.5)\n for i in range(1, sq_root + 1):\n if num % i == 0:\n divisors.extend([i, num // i])\n # if num has a perfect sq, that number will be added twice, then:\n if sq_root ** 2 == num:\n divisors.remove(sq_root)\n return divisors", "def get_divisors(n):\n n = abs(n)\n divisors = []\n for i in range(1, int(n**0.5)+1):\n if n%i == 0:\n divisors.append(i)\n divisors.append(-i)\n if i*i != n:\n divisors.append(n//i)\n divisors.append(-n//i)\n return sorted(divisors, key=abs)", "def div(a, x):\n return [a[i]/x for i in range(2)]", "def proper_divisors(n: int) -> [int]:\n\n if n == 1:\n return []\n\n x = 2\n divisors = set([1])\n while x * x <= n and n > 1:\n if n % x == 0:\n divisors.add(x)\n divisors.add(n // x)\n\n x += 1\n\n s = sorted(divisors)\n return s", "def prime_divisors(n):\n\treturn tuple(set(factors(n)))", "def simple_get_divisors(num: int) -> list:\n all_divisors = []\n for possible_divisor in range(1, math.floor(num / 2) + 1):\n if num % possible_divisor == 0:\n all_divisors.append(possible_divisor)\n return all_divisors", "def find_divisors_1(number):\n divisors = []\n # Test all numbers from 1 to number-1.\n # Actually, we can be more efficient with range(1, (number//2)+1)\n for n in range(1, number): \n if number % n == 0:\n divisors.append(n)\n return divisors", "def divisors(n):\n d = []\n for i in range(1, int(math.sqrt(n) + 1)):\n if n % i == 0:\n d.append(i)\n d.append(n / i)\n return set(d)", "def divisors(n):\n dvs = []\n for i in range(1, int(math.sqrt(n)) + 1):\n if n % i == 0:\n dvs.append(i)\n j = n / i\n if j != i:\n dvs.append(j)\n\n dvs.remove(n)\n return dvs", "def findDivisor(num):\n divisors = [1]\n for i in range(2, int(sqrt(num)) + 1):\n if num % i == 0:\n divisors.append(i)\n temp = num / i\n if temp != i:\n divisors.append(temp)\n return divisors", "def divisors(n: int) -> list:\n # iterate through every number <= n/2 and check whether the number is a divisor\n # append to list if not in list\n # in the end, append the number\n divs = [n]\n for i in range(1, n//2 + 1):\n if n % i == 0:\n divs.append(i)\n return divs", "def divisors(n):\n return [x for x in range(1, n) if n % x == 0]", "def _find_dividers(num: int) -> List[int]:\r\n\r\n dividers: List[int] = list()\r\n while num != 1:\r\n primes = PrimeHandler.find_all_primes(num)\r\n for prime in reversed(primes):\r\n if num % prime == 0:\r\n dividers.append(prime)\r\n num = num // prime\r\n break\r\n return list(reversed(dividers))", "def divisors(decomp):\n combine = lambda acc, p: set(a * (p ** e) for a in acc for e in xrange(decomp[p] + 1))\n return reduce(combine, decomp, {1})", "def find_divisors(integer):\n\n divisors = []\n # we know that an integer divides itself\n divisors.append(integer)\n # we also know that the biggest divisor other than the integer itself\n # must be at most half the value of the integer (think about it)\n divisor = integer / 2\n\n while divisor >= 0:\n if is_divisible(integer, divisor):\n divisors.append(divisor)\n divisor =- 1\n\n return divisors", "def div(x, y):\n return x / y", "def divisors(n):\r\n numbers = []\r\n for i in xrange(1, n+1):\r\n if n % i == 0:\r\n numbers.append(i)\r\n return numbers", "def proper_divisors(n):\n divisors = set([1])\n for i in range(2, int(ceil(sqrt(n)))+1):\n if n % i == 0:\n divisors.add(i)\n divisors.add(n/i)\n return divisors", "def divisors(N):\n # Initialize the list of divisors\n divisor_list = [1]\n # Check division by d for d <= N/2\n for d in range(2,N // 2 + 1):\n if N % d == 0:\n divisor_list.append(d)\n divisor_list.append(N)\n return divisor_list" ]
[ "0.8460022", "0.7699094", "0.7683258", "0.7621047", "0.7449918", "0.7439679", "0.7402586", "0.73635805", "0.73439324", "0.7196147", "0.71849275", "0.7158595", "0.71131396", "0.7095032", "0.7063125", "0.7036525", "0.70283794", "0.69887", "0.69874907", "0.69815105", "0.6960848", "0.69456506", "0.69359", "0.683036", "0.68243897", "0.6773449", "0.67579323", "0.67500716", "0.67435455", "0.67093045" ]
0.8262666
1
Test for MesoscopePreprocess.get_default_tau method.
def test_get_default_tau(self): subject_detail = {'genotype': [{'allele': 'Cdh23', 'zygosity': 1}, {'allele': 'Ai95-G6f', 'zygosity': 1}, {'allele': 'Camk2a-tTa', 'zygosity': 1}]} with mock.patch.object(self.task.one.alyx, 'rest', return_value=subject_detail): self.assertEqual(self.task.get_default_tau(), .7) subject_detail['genotype'].pop(1) self.assertEqual(self.task.get_default_tau(), 1.5) # return the default value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getDefault():", "def test_change_default_dt_static(self):\n ct.set_defaults('control', default_dt=0)\n assert ct.tf(1, 1).dt is None\n assert ct.ss([], [], [], 1).dt is None", "def default_tune(self):\n return self._default_tune", "def init_tau(self, type: str = 'safest', weight: float = 1.5):\n\n P = self.toeplitz_op.P\n weighted_gram = 2 * self.linear_op.gram\n if self.beta is not None:\n beta = self.beta\n else:\n try:\n beta = eigs(weighted_gram, k=1, which='LM', return_eigenvectors=False, tol=self.eig_tol)\n beta *= (1 + self.eig_tol)\n except Exception('Eigs solver did not converge, trying again with small tolerance...'):\n beta = eigs(weighted_gram, k=1, which='LM', return_eigenvectors=False, tol=1e-3)\n beta *= (1 + 1e-3)\n ub = 1 / beta * (1 + 1 / np.sqrt(P + 1))\n lb = 1 / beta * (1 - 1 / np.sqrt(P + 1))\n if type == 'fastest':\n try:\n alpha = eigs(weighted_gram, k=1, which='SM', return_eigenvectors=False, tol=self.eig_tol)\n alpha *= (1 + self.eig_tol)\n except Exception('Eigs solver did not converge. Alpha is set to zero.'):\n alpha = 0\n tau_opt = 2 / (beta + alpha)\n if (tau_opt <= ub) & (tau_opt >= lb):\n self.tau = tau_opt\n else:\n min_lb = np.fmin(np.abs(1 - lb * alpha), np.abs(1 - lb * beta))\n min_ub = np.fmin(np.abs(1 - ub * alpha), np.abs(1 - ub * beta))\n if np.argmin([min_lb, min_ub]) == 0:\n self.tau = lb\n else:\n self.tau = ub\n elif type == 'safest':\n self.tau = 1 / beta\n elif type == 'largest':\n self.tau = ub\n else:\n self.tau = weight / beta", "def __init__(self,tau = 1e-3):\n self._tau = tau \n pass", "def test_get_params():\n\n kwargs = {\n 'population_size': 500,\n 'generations': 1000,\n 'verbosity': 1\n }\n\n tpot_obj = TPOTClassifier(**kwargs)\n\n # Get default parameters of TPOT and merge with our specified parameters\n initializer = inspect.getargspec(TPOTBase.__init__)\n default_kwargs = dict(zip(initializer.args[1:], initializer.defaults))\n default_kwargs.update(kwargs)\n\n assert tpot_obj.get_params() == default_kwargs", "def getdefaulttimeout():\r\n return default_timeout", "def get_default_params() -> Dict:\n default_params = {\n \"n_estimators\": {\n \"default_value\": 100,\n \"description\": \"Number of gradient boosted trees. \"\n \"Equivalent to number of boosting rounds.\",\n \"type\": \"int\"\n },\n \"max_depth\": {\n \"default_value\": 6,\n \"description\": \"Maximum tree depth for base learners.\",\n \"type\": \"int\"\n },\n \"learning_rate\": {\n \"default_value\": 0.3,\n \"description\": \"Boosting learning rate (xgb's 'eta')\",\n \"type\": \"float\"\n },\n \"verbosity\": {\n \"default_value\": 1,\n \"description\": \"The degree of verbosity. Valid values are 0 (silent) - 3 (debug).\",\n \"type\": [0, 1, 2, 3]\n },\n \"booster\": {\n \"default_value\": \"gbtree\",\n \"description\": \"Specify which booster to use: gbtree, gblinear or dart.\",\n \"type\": ['gbtree', 'gblinear', 'dart']\n },\n \"tree_method\": {\n \"default_value\": \"auto\",\n \"description\":\n '''\n Specify which tree method to use. Default to auto. If this parameter\n is set to default, XGBoost will choose the most conservative option\n available. It's recommended to study this option from parameters\n document.\n ''',\n \"type\": [\"auto\", \"exact\", \"approx\", \"hist\", \"gpu_hist\"]\n },\n \"n_jobs\": {\n \"default_value\": 1,\n \"description\": '''\n Number of parallel threads used to run xgboost. When used with other Scikit-Learn\n algorithms like grid search, you may choose which algorithm to parallelize and\n balance the threads. Creating thread contention will significantly slow dowm both\n algorithms.\n ''',\n \"type\": \"int\"\n },\n \"gamma\": {\n \"default_value\": 0.0,\n \"description\": \"Minimum loss reduction required to make a further \"\n \"partition on a leaf node of the tree.\",\n \"type\": \"float\"\n },\n \"min_child_weight\": {\n \"default_value\": 1.0,\n \"description\": \"Minimum loss reduction required to make a further \"\n \"partition on a leaf node of the tree.\",\n \"type\": \"float\"\n },\n \"max_delta_step\": {\n \"default_value\": 0.0,\n \"description\": \"Maximum delta step we allow each tree's weight estimation to be.\",\n \"type\": \"float\"\n },\n \"subsample\": {\n \"default_value\": 1.0,\n \"description\": \"Subsample ratio of the training instance.\",\n \"type\": \"float\"\n },\n \"colsample_bytree\": {\n \"default_value\": 1.0,\n \"description\": \"Subsample ratio of columns when constructing each tree.\",\n \"type\": \"float\"\n },\n \"colsample_bylevel\": {\n \"default_value\": 1.0,\n \"description\": \"Subsample ratio of columns for each level.\",\n \"type\": \"float\"\n },\n \"colsample_bynode\": {\n \"default_value\": 1.0,\n \"description\": \"Subsample ratio of columns for each split.\",\n \"type\": \"float\"\n },\n \"reg_alpha\": {\n \"default_value\": 0.0,\n \"description\": \"L1 regularization term on weights\",\n \"type\": \"float\"\n },\n \"reg_lambda\": {\n \"default_value\": 0.0,\n \"description\": \"L2 regularization term on weights\",\n \"type\": \"float\"\n },\n \"scale_pos_weight\": {\n \"default_value\": 1.0,\n \"description\": \"Balancing of positive and negative weights.\",\n \"type\": \"float\"\n },\n \"random_state\": {\n \"default_value\": 0,\n \"description\": \"Random number seed.\",\n \"type\": \"int\"\n },\n \"base_score\": {\n \"default_value\": 0.5,\n \"description\": \"The initial prediction score of all instances, global bias.\",\n \"type\": \"float\"\n },\n # \"missing\": {\n # \"default_value\": None,\n # \"description\": \"Value in the data which needs to be present as a missing value.\",\n # \"type\": \"float\"\n # },\n \"num_parallel_tree\": {\n \"default_value\": 1,\n \"description\": \"Used for boosting random forest.\",\n \"type\": \"int\"\n },\n # \"monotone_constraints\": {\n # \"default_value\": \"(0,0)\",\n # \"description\": \" Constraint of variable monotonicity. \"\n # \"See tutorial for more information.\",\n # \"type\": \"str\"\n # },\n # \"interaction_constraints\": {\n # \"default_value\": None,\n # \"description\": '''\n # Constraints for interaction representing permitted interactions. The\n # constraints must be specified in the form of a nest list, e.g. [[0, 1],\n # [2, 3, 4]], where each inner list is a group of indices of features\n # that are allowed to interact with each other. See tutorial for more\n # information\n # ''',\n # \"type\": \"str\"\n # },\n \"importance_type\": {\n \"default_value\": \"gain\",\n \"description\": '''\n The feature importance type for the feature_importances. property:\n either \"gain\", \"weight\", \"cover\", \"total_gain\" or \"total_cover\".\n ''',\n \"type\": [\"gain\", \"weight\", \"cover\", \"total_gain\", \"total_cover\"]\n }\n }\n\n return default_params", "def test_no_default_value(self):\n dim = Real(\"yolo\", \"uniform\", -3, 4)\n assert dim.default_value is None", "def test_change_default_dt(self, dt):\n ct.set_defaults('control', default_dt=dt)\n assert ct.ss(1, 0, 0, 1).dt == dt\n assert ct.tf(1, [1, 1]).dt == dt\n nlsys = ct.NonlinearIOSystem(\n lambda t, x, u: u * x * x,\n lambda t, x, u: x, inputs=1, outputs=1)\n assert nlsys.dt == dt", "def test_init_max_time_mins():\n\n tpot_obj = TPOTClassifier(max_time_mins=30, generations=1000)\n\n assert tpot_obj.generations == 1000000\n assert tpot_obj.max_time_mins == 30", "def _default_tcrsampler_human_beta(default_background = None, default_background_if_missing=None):\n from tcrsampler.sampler import TCRsampler\n if default_background is None:\n default_background = 'britanova_human_beta_t_cb.tsv.sampler.tsv'\n \n if default_background_if_missing is None:\n default_background_if_missing ='britanova_human_beta_t_cb.tsv.sampler.tsv.zip'\n \n \n print(default_background)\n\n try: \n t = TCRsampler(default_background=default_background)\n except OSError:\n t = TCRsampler()\n t.download_background_file(default_background_if_missing)\n t = TCRsampler(default_background=default_background)\n return t", "def default_value(behav, param):\n default_value_ = 0\n if ({self.pj[ETHOGRAM][idx][\"type\"] for idx in self.pj[ETHOGRAM] if\n self.pj[ETHOGRAM][idx][\"code\"] == behav} == {\"Point event\"}\n and param in [\"duration\"]):\n default_value_ = \"-\"\n return default_value_", "def test_default_objective(self, initial_placement_fixture):\n assert len(ctx.cluster.influx_db.aggregate_performance()) == 0, \\\n \"Test should run on the basic model\"\n path = join(self.mount.path, 'initial_placement')\n self.client.mkdirs(path)\n file_names = [join(path, 'basic_objective' + str(file_index))\n for file_index in range(self.FILE_NUMBER)]\n statistics = self.scatter(file_names)\n self.random_verification(statistics, self.FILE_NUMBER)", "def test_no_default_value(self):\n dim = Integer(\"yolo\", \"uniform\", -3, 4)\n assert dim.default_value is None", "def test_default_units(self):\n s = State(\"water\", T=Q_(100, \"degC\"), p=Q_(1.0, \"atm\"))\n assert s.units is None\n set_default_units(\"SI\")\n s2 = State(\"water\", T=Q_(100, \"degC\"), p=Q_(1.0, \"atm\"))\n assert s2.units == \"SI\"\n set_default_units(\"EE\")\n s3 = State(\"water\", T=Q_(100, \"degC\"), p=Q_(1.0, \"atm\"))\n assert s3.units == \"EE\"\n set_default_units(None)", "def test_no_default_value(self):\n dim = Dimension(\"yolo\", \"uniform\", -3, 4)\n assert dim.default_value is None", "def default():\n return DefaultTroughPhysicalProcessHeat.default()", "def test_gcp_defaults():\n args = argparse.Namespace(cfg=os.path.join(TEST_DATA_DIR, 'gcp-defaults.ini'))\n cfg = ElasticBlastConfig(configure(args), task = ElbCommand.SUBMIT)\n check_common_defaults(cfg)\n\n assert cfg.cloud_provider.cloud == CSP.GCP\n assert cfg.cluster.pd_size == constants.ELB_DFLT_GCP_PD_SIZE\n\n assert cfg.timeouts.blast_k8s == constants.ELB_DFLT_BLAST_K8S_TIMEOUT\n assert cfg.timeouts.init_pv == constants.ELB_DFLT_INIT_PV_TIMEOUT", "def _default_tcrsampler_olga_human_beta(default_background = None, default_background_if_missing=None):\n from tcrsampler.sampler import TCRsampler\n if default_background is None:\n default_background = 'olga_human_beta_t.sampler.tsv'\n \n if default_background_if_missing is None:\n default_background_if_missing ='olga_sampler.zip'\n \n print(default_background)\n\n try: \n t = TCRsampler(default_background=default_background)\n except OSError:\n t = TCRsampler()\n t.download_background_file(default_background_if_missing)\n t = TCRsampler(default_background=default_background)\n return t", "def test_none_meet(self, initial_placement_fixture):\n assert len(ctx.cluster.influx_db.aggregate_performance()) == 0, \\\n \"Test should run on the basic model\"\n self.generic_function(above_objective=0)", "def test_qpu_default_shots():\n dev = _aws_device(wires=2, shots=None)\n assert dev.shots == AwsDevice.DEFAULT_SHOTS_QPU\n assert not dev.analytic", "def getTau(self) -> float:\n return self.tau", "def tau_ruptura_prem( largo_falla, prof_media, prof_prem, vs_prem ):\n\n dif_profs = np.abs( prof_prem - prof_media )\n idx_prof_media = np.where( dif_profs == dif_profs.min() )\n vs_prof_media = vs_prem[idx_prof_media]\n\n # puede que la profundidad media de la falla se encuentre justo entre 2 valores del modelo\n # si esto sucede, se calcula el promedio de las velocidades de ambos\n if len(vs_prof_media) > 1:\n vs_prof_media = vs_prof_media.mean()\n else:\n vs_prof_media = vs_prof_media\n\n tau = largo_falla/( 0.8 * vs_prof_media )\n\n return tau", "def test_container_get_progress(self):\r\n progress = self.combinedoe_container.max_score()\r\n self.assertEqual(progress, None)", "def get_default(cls):\n raise NotImplementedError", "def default_metric_value(self) -> float:", "def test_default_parameters() -> None:\n mapie = MapieRegressor()\n assert mapie.estimator is None\n assert mapie.method == \"plus\"\n assert mapie.cv is None\n assert not mapie.ensemble\n assert mapie.verbose == 0\n assert mapie.n_jobs is None", "def _getnt(simulation, t=None):\n nt_sim = simulation.nt()\n \n if t is not None:\n \n dummy = np.zeros(nt_sim)\n nt = len2(dummy[t])\n \n else:\n \n nt = nt_sim\n \n return nt", "def test_Defaults(self):\n self._run(self._test_scenarios, \"Defaults\")" ]
[ "0.58054805", "0.5766539", "0.55542696", "0.54653114", "0.53289515", "0.5280533", "0.5262529", "0.5202815", "0.518993", "0.51870537", "0.51478577", "0.5112242", "0.50845855", "0.50789475", "0.5076937", "0.5075569", "0.5075425", "0.50587046", "0.5044964", "0.50403994", "0.5038787", "0.5028384", "0.49987024", "0.4992654", "0.49682638", "0.49648222", "0.49563205", "0.49500754", "0.49499878", "0.4946004" ]
0.6857306
0
Test for MesoscopeFOV.get_provenance method.
def test_get_provenance(self): filename = 'mpciMeanImage.mlapdv_estimate.npy' provenance = MesoscopeFOV.get_provenance(filename) self.assertEqual('ESTIMATE', provenance.name) filename = 'mpciROIs.brainLocation_ccf_2017.npy' provenance = MesoscopeFOV.get_provenance(filename) self.assertEqual('HISTOLOGY', provenance.name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_provenance_extras():\n target = DummyTarget()\n provenance = target.provenance()\n assert \"qcsubmit\" in provenance\n assert \"openforcefield\" in provenance\n assert \"bespokefit\" in provenance\n assert \"openforcefield\" in provenance\n assert \"openforcefields\" in provenance\n assert provenance[\"target\"] == target.name\n\n # now add qcsubmit and call again\n target._extra_dependencies.append(\"openeye\")\n provenance = target.provenance()\n assert \"openeye\" in provenance", "def load_provenance(self):\n\n try:\n entry = self._get_nearest_entry_with_artifact()\n if entry is None:\n return None\n return entry.provenance\n except InternalCacheStateError as e:\n self._raise_state_error_with_explanation(e)", "def readProvenanceEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.PROVENANCE_SECTION)", "def testPrefetchProvenance(self):\n try:\n provU = ProvenanceProvider(self.__cfgOb, self.__cachePath)\n pD = provU.fetch()\n logger.debug(\"pD keys %r\", list(pD.keys()))\n self.assertGreaterEqual(len(pD.keys()), 1)\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def test_07_provenance_is_empty(self):\n outfiles = reporting.provenance_reports(\"2015-01-01T00:00:00Z\", \"2016-01-01T00:00:00Z\", TMP_DIR)\n assert outfiles is None, outfiles\n\n # Try as background job\n job = reporting.ReportingBackgroundTask.prepare(\"system\", outdir=TMP_DIR, from_date=DEFAULT_TIMESTAMP_VAL,\n to_date=dates.now_str())\n reporting.ReportingBackgroundTask.submit(job)\n time.sleep(1)\n job = models.BackgroundJob.pull(job.id)\n\n assert 'No provenance records found' in json.dumps(job.audit), job.audit", "def test_properties_evolution_get(self):\n pass", "def provenance(self, provenance):\n\n self._provenance = provenance", "def _checkProvenace(item, path):\n if item is None:\n return item\n\n item_path_normalized = os.path.abspath(os.path.expandvars(os.path.expanduser(item)))\n if os.path.isfile(item_path_normalized):\n # Add full path\n item = item_path_normalized\n if item not in df.index: # If it is a file and it is not being uploaded\n try:\n bundle = syn._getFromFile(item)\n return bundle\n except SynapseFileNotFoundError:\n SynapseProvenanceError((\"The provenance record for file: %s is incorrect.\\n\"\n \"Specifically %s is not being uploaded and is not in Synapse.\"\n % (path, item)))\n\n elif not utils.is_url(item) and (utils.is_synapse_id(item) is None):\n raise SynapseProvenanceError((\"The provenance record for file: %s is incorrect.\\n\"\n \"Specifically %s, is neither a valid URL or synapseId.\") % (path, item))\n return item", "def testFetch(self):\n try:\n provU = ProvenanceProvider(self.__cfgOb, self.__cachePath, useCache=False)\n pD = {self.__provKeyName: self.__provInfoL}\n ok = provU.store(pD)\n self.assertTrue(ok)\n #\n fD = provU.fetch()\n self.assertTrue(self.__provKeyName in fD)\n self.assertDictEqual(pD, fD)\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def _get_file_entity_provenance_dict(syn, entity):\n try:\n prov = syn.getProvenance(entity)\n return {'used': ';'.join(prov._getUsedStringList()),\n 'executed': ';'.join(prov._getExecutedStringList()),\n 'activityName': prov.get('name', ''),\n 'activityDescription': prov.get('description', '')}\n except SynapseHTTPError as e:\n if e.response.status_code == 404:\n return {} # No provenance present return empty dict\n else:\n raise # unexpected error so we re-raise the exception", "def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):\n\n # Set up the database connection.\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('vinwah', 'vinwah')\n\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/vinwah#') # The scripts are in <folder>#<filename> format.\n doc.add_namespace('dat', 'http://datamechanics.io/data/vinwah#') # The data sets are in <user>#<collection> format.\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#') # 'Extension', 'DataResource', 'DataSet', 'Retrieval', 'Query', or 'Computation'.\n doc.add_namespace('log', 'http://datamechanics.io/log/') # The event log.\n\n this_script = doc.agent('alg:getBusinessesByCategory', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\n\n resource = doc.entity('dat:businesses', {'prov:label': 'Businesses in Boston', prov.model.PROV_TYPE: 'ont:DataSet'})\n\n get = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n\n doc.wasAssociatedWith(get, this_script)\n\n doc.usage(get, resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Computation'})\n\n enti = doc.entity('dat:businessesByCategory', {prov.model.PROV_LABEL: 'Location of Businesses in Boston by top 10 categories', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(enti, this_script)\n doc.wasGeneratedBy(enti, get, endTime)\n doc.wasDerivedFrom(enti, resource, get, get, get)\n\n repo.logout()\n\n return doc", "def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):\n\n # Set up the database connection.\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('jdbrawn_jliang24_slarbi_tpotye', 'jdbrawn_jliang24_slarbi_tpotye')\n\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/') # The scripts are in <folder>#<filename> format.\n doc.add_namespace('dat', 'http://datamechanics.io/data/') # The data sets are in <user>#<collection> format.\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#') # 'Extension', 'DataResource', 'DataSet', 'Retrieval', 'Query', or 'Computation'.\n doc.add_namespace('log', 'http://datamechanics.io/log/') # The event log.\n doc.add_namespace('bdp', 'https://data.boston.gov/api/action/datastore_search?resource_id=')\n doc.add_namespace('591', 'http://datamechanics.io/data/jdbrawn_jliang24_slarbi_tpotye/')\n doc.add_namespace('bdp1', 'https://data.cityofboston.gov/resource/')\n\n this_script = doc.agent('alg:jdbrawn_jliang24_slarbi_tpotye#safetyCorrelation', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\n\n resource_policeAnalysis = doc.entity('dat:jdbrawn_jliang24_slarbi_tpotye#policeAnalysis', {'prov:label': 'Police Station and Schools', prov.model.PROV_TYPE: 'ont:DataSet'})\n\n\n get_safetyCorrelation = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n\n doc.wasAssociatedWith(get_safetyCorrelation, this_script)\n\n doc.usage(get_safetyCorrelation, resource_policeAnalysis, startTime, None, {prov.model.PROV_TYPE: 'ont:Computation'})\n\n correlation = doc.entity('dat:jdbrawn_jliang24_slarbi_tpotye#safetyCorrelation', {prov.model.PROV_LABEL: 'Safety Correlation', prov.model.PROV_TYPE: 'ont:DataSet'})\n \n doc.wasAttributedTo(correlation, this_script)\n doc.wasGeneratedBy(correlation, get_safetyCorrelation, endTime)\n doc.wasDerivedFrom(correlation, resource_policeAnalysis, get_safetyCorrelation, get_safetyCorrelation, get_safetyCorrelation)\n\n repo.logout()\n\n return doc", "def is_proved(self):\n return len(self.proofs) > 0", "def get_hotspot_provenance(self, suptitle, scenario, ancestor_files):\n caption = (f\"{suptitle}. Calculated for seasons \"\n f\"{self.seasons[0].upper()}, \"\n f\"{self.seasons[1].upper()} and {self.seasons[2].upper()} \"\n f\"in the future periods {self.cfg['future_periods'][0]} \"\n f\"and {self.cfg['future_periods'][1]} \"\n f\"for CMIP5 {self.formatter(f'cmip5-{scenario}')} \"\n f\"and CMIP6 {self.formatter(f'cmip6-{scenario}')}\")\n\n record = {\n 'caption': caption,\n 'statistics': ['anomaly', 'diff'],\n 'domains': ['reg'],\n 'plot_types': ['map'],\n 'authors': [\n 'cos_josep',\n ],\n 'references': [\n 'cos22esd',\n ],\n 'ancestors': ancestor_files,\n }\n return record", "def testStore(self):\n try:\n provU = ProvenanceProvider(self.__cfgOb, self.__cachePath, useCache=False)\n pD = {self.__provKeyName: self.__provInfoL}\n ok = provU.store(pD)\n #\n self.assertTrue(ok)\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def test_passage_retrieval(self):\n\t\t\n\t\tself.assertTrue(helpers.get_verse_contents('Genesis', '1', '1', 'ESV') != False)", "def readAssetProvenanceObjects(context):\n assetProvenanceObjects = []\n provenance = GenericMetadata.readProvenanceEntries(context)\n try:\n assets = provenance['entities'].split(GenericMetadata.VALUE_DELIM)\n for asset in assets:\n assetProvenanceObjects.append(AssetProvenance.readFromMetadata(context, asset))\n except KeyError:\n pass\n return assetProvenanceObjects", "def addProvenance(self, provenance_on=True):\n self.kwargs['additionalInfo'] = provenance_on", "def test_get_proposal_demand(self):\n pass", "def check_proof(self):\n for gene in self.population:\n if gene.is_proof:\n print(gene.chromosome)\n for state in gene.coq_states:\n print(state)\n self.proofs.append(Gene(chromosome=gene.valid_tactics))", "def test_variablepresentations_get(self):\n pass", "def provenance(doc = prov.model.ProvDocument(), startTime = None, endTime = None):\n\n # Set up the database connection.\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('jyaang_robinliu106', 'jyaang_robinliu106')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/') # The scripts are in <folder>#<filename> format.\n doc.add_namespace('dat', 'http://datamechanics.io/data/') # The data sets are in <user>#<collection> format.\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#') # 'Extension', 'DataResource', 'DataSet', 'Retrieval', 'Query', or 'Computation'.\n doc.add_namespace('log', 'http://datamechanics.io/log/') # The event log.\n doc.add_namespace('bdp', 'https://data.cityofboston.gov/resource/')\n\n this_script = doc.agent('alg:jyaang_robinliu106#getNeighborhoodScores', {prov.model.PROV_TYPE:prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\n hospital_resource = doc.entity('bdp:46f7-2snz', {'prov:label': 'Hospital Coordinates', prov.model.PROV_TYPE:'ont:DataResource', 'ont:Extension':'json'})\n school_resource = doc.entity('bdp:e29s-ympv', {'prov:label': 'School Coordinates', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension':'json'})\n crime_resource = doc.entity('bdp:fqn4-4qap', {'prov:label': 'Crime Coordinates', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension':'json'})\n neighborhoodScores = doc.entity('dat:jyaang_robinliu106#neighborhood_scores', {prov.model.PROV_LABEL: 'Scores of each Boston neighborhood', prov.model.PROV_TYPE:'ont:DataSet'})\n get_NeighborhoodScores = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime, {prov.model.PROV_TYPE:'ont:Computation'})\n doc.wasAssociatedWith(get_NeighborhoodScores, this_script)\n doc.used(get_NeighborhoodScores, neighborhoodScores, startTime)\n doc.wasAttributedTo(neighborhoodScores, this_script)\n doc.wasGeneratedBy(neighborhoodScores, get_NeighborhoodScores, endTime)\n\n repo.record(doc.serialize()) # Record the provenance document\n repo.logout()\n return doc", "def _get_prochirality(self):\n for atom in self.invarioms:\n atom.get_prochirality()\n atom.invariom.get_prochirality()", "def test_getPassage_full_metadata(self):\n passage = self.resolver.getTextualNode(\"urn:cts:latinLit:phi1294.phi002.perseus-lat2\", metadata=True)\n\n self.assertIsInstance(\n passage, Passage,\n \"GetPassage should always return passages objects\"\n )\n self.assertEqual(\n str(passage.metadata[NAMESPACES.CTS.term(\"title\"), \"eng\"]), \"Epigrammata\",\n \"Local Inventory Files should be parsed and aggregated correctly\"\n )\n self.assertEqual(\n str(passage.metadata[NAMESPACES.CTS.term(\"groupname\"),\"eng\"]), \"Martial\",\n \"Local Inventory Files should be parsed and aggregated correctly\"\n )\n self.assertEqual(\n str(passage.metadata[NAMESPACES.CTS.term(\"label\"), \"eng\"]), \"Epigrams\",\n \"Local Inventory Files should be parsed and aggregated correctly\"\n )\n self.assertEqual(\n str(passage.metadata[NAMESPACES.CTS.term(\"description\"), \"eng\"]),\n \"M. Valerii Martialis Epigrammaton libri / recognovit W. Heraeus\",\n \"Local Inventory Files should be parsed and aggregated correctly\"\n )\n self.assertEqual(\n passage.citation.name, \"book\",\n \"Local Inventory Files should be parsed and aggregated correctly\"\n )\n self.assertEqual(\n len(passage.citation), 3,\n \"Local Inventory Files should be parsed and aggregated correctly\"\n )\n\n children = list(passage.getReffs(level=3))\n # We check the passage is able to perform further requests and is well instantiated\n self.assertEqual(\n children[0], '1.pr.1',\n \"Resource should be string identifiers\"\n )\n\n self.assertIn(\n \"Hic est quem legis ille, quem requiris,\", passage.export(output=Mimetypes.PLAINTEXT),\n \"Export PrototypeText should work correctly\"\n )\n\n self.assertEqual(\n passage.export(\n output=Mimetypes.PYTHON.ETREE\n ).xpath(\n \".//tei:div[@n='1']/tei:div[@n='1']/tei:l[@n='1']/text()\", namespaces=NS, magic_string=False\n ),\n [\"Hic est quem legis ille, quem requiris, \"],\n \"Export to Etree should give an Etree or Etree like object\"\n )", "def test_properties_get(self):\n pass", "def get_provenance_tails(self) -> List[\"Dataset\"]:\n raise NotImplementedError", "def test_api_promotions_get(self):\n default_api = DefaultApi(api_client=self.api_client)\n params = dlrnapi_client.Promotion()\n path, method = default_api.api_promotions_get(params)\n self.assertEqual(path, '/api/promotions')\n self.assertEqual(method, 'GET')", "def propositions(civic_eid2997_proposition):\n return [civic_eid2997_proposition]", "def _check_accessor_for_version_problems(self):\n\n old_prov = self._cache_accessor.load_provenance()\n if old_prov is None:\n return\n\n new_prov = self._cache_accessor.provenance\n if old_prov.exactly_matches(new_prov):\n return\n\n if old_prov.nominally_matches(new_prov):\n # If we have a nominal match but not an exact match, that means the\n # user must changed a function's bytecode but not its version. To report\n # this, we first need to figure out which function changed. It could be\n # the one for this task, or it could be any immediate non-persisted\n # ancestor of this one. Fortunately, each provenance contains links to each of\n # its dependency digests, and a digest of non-persisted value contains that\n # value's provenance, so we can recursively search through our ancestor\n # provenances until we find which one caused the mismatch.\n def locate_mismatched_provenances_and_raise(old_prov, new_prov):\n assert old_prov.nominally_matches(new_prov)\n # If the bytecode doesn't match, we found the problematic pair.\n if old_prov.bytecode_hash != new_prov.bytecode_hash:\n message = f\"\"\"\n Found a cached artifact with the same descriptor\n ({self._cache_accessor.provenance.descriptor!r})\n and version (major={old_prov.code_version_major!r},\n minor={old_prov.code_version_minor!r}),\n but created by different code.\n It appears that the code function that outputs\n {new_prov.descriptor}\n was changed (old bytecode hash {old_prov.bytecode_hash!r};\n new bytecode hash {new_prov.bytecode_hash!r})\n but the function's version number was not.\n Change @version(major=) to indicate that your\n function's behavior has changed, or @version(minor=)\n to indicate that it has *not* changed.\n \"\"\"\n raise CodeVersioningError(oneline(message), new_prov.descriptor)\n # If the provenances nominally match, they must have essentially the\n # same structure.\n assert len(old_prov.dep_digests) == len(new_prov.dep_digests)\n # Since these provenances match nominally and have matching bytcode,\n # the mismatch must be in one of their dependencies. We'll iterate\n # through them to figure out which one.\n for old_dep_digest, new_dep_digest in zip(\n old_prov.dep_digests, new_prov.dep_digests\n ):\n # If this digest pair matches, it must not be where the problem is.\n if old_dep_digest.exact_hash == new_dep_digest.exact_hash:\n continue\n\n # Not all digests have provenances, but these should. Digests of\n # non-persisted values have provenances, and if these were persisted\n # then their exact hashes would be the same as their nominal hashes,\n # so they would have matched above.\n old_dep_prov = old_dep_digest.provenance\n new_dep_prov = new_dep_digest.provenance\n locate_mismatched_provenances_and_raise(old_dep_prov, new_dep_prov)\n assert False\n\n try:\n locate_mismatched_provenances_and_raise(old_prov, new_prov)\n except AssertionError as e:\n message = f\"\"\"\n Enncountered an internal error while performing an assisted versioning\n check. This should be impossible and is probably a bug in Bionic; please\n report this stace track to the developers. However, it's also likely\n that you need to update the ``@version`` annotation on the function\n that outputs {self._cache_accessor.provenance.descriptor}.\n If that doesn't fix the warning, you can try filtering the warning with\n ``warnings.filterwarnings``; deleting the disk cache; or disabling\n assisted versioning.\n \"\"\"\n logger.warn(oneline(message), exc_info=e)\n\n self._cache_accessor.update_provenance()", "def setProvenance(args, syn):\n \n activity = Activity(name=args.name, description=args.description)\n if args.used:\n for item in args.used:\n activity.used(item)\n if args.executed:\n for item in args.executed:\n activity.used(item, wasExecuted=True)\n activity = syn.setProvenance(args.id, activity)\n\n # Display the activity record, if -o or -output specified\n if args.output:\n if args.output=='STDOUT':\n sys.stdout.write(json.dumps(activity))\n sys.stdout.write('\\n')\n else:\n with open(args.output, 'w') as f:\n f.write(json.dumps(activity))\n f.write('\\n')\n else:\n print 'Set provenance record %s on entity %s\\n' % (str(activity['id']), str(args.id))" ]
[ "0.62085104", "0.6152855", "0.6148594", "0.5985703", "0.596019", "0.57090783", "0.5705128", "0.55922544", "0.5544663", "0.5482702", "0.5366024", "0.53544986", "0.5342119", "0.52535665", "0.52333575", "0.5217271", "0.5199212", "0.5185142", "0.5115572", "0.5111425", "0.5097022", "0.50024205", "0.49793264", "0.496036", "0.4958025", "0.4936191", "0.493233", "0.48924503", "0.48731345", "0.4855662" ]
0.84042156
0
Test for find_triangle function.
def test_find_triangle(self): points = np.array([[2.435, -3.37], [2.435, -1.82], [2.635, -2.], [2.535, -1.7]]) connectivity_list = np.array([[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5]], dtype=np.intp) point = np.array([2.6, -1.9]) self.assertEqual(1, find_triangle(point, points, connectivity_list)) point = np.array([3., 1.]) # outside of defined vertices self.assertEqual(-1, find_triangle(point, points, connectivity_list))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_inside_triangle(self):\n\n # defining triangle vertices\n v1x, v1y = 0, 0\n v2x, v2y = 1, 1\n v3x, v3y = 1, 0\n\n # test vertices are inside\n self.assertTrue(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, v1x, v1y))\n self.assertTrue(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, v2x, v2y))\n self.assertTrue(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, v3x, v3y))\n\n # check line segments are inside\n self.assertTrue(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, 0.5, 0))\n self.assertTrue(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, 1, 0.5))\n self.assertTrue(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, 0.5, 0.5))\n\n # check an interior point\n self.assertTrue(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, 0.5, 0.1))\n\n # check an exterior point\n self.assertFalse(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, -0.5, -0.5))\n self.assertFalse(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, 0.5, -0.01))\n self.assertFalse(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, 1.01, 0.5))\n self.assertFalse(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, 0.49999, 0.5001))", "def test_classify_triangle(self):\n self.assertTrue(classify_triangle(5,4,3),\"5, 4, 3 can make up Right triangle.\")\n self.assertTrue(classify_triangle(1,1,1),\"1, 1, 1 can up Equilateral triangle.\")\n self.assertTrue(classify_triangle(1,2,3),\"1, 2, 3 Can Not make up the triangle.\")\n self.assertTrue(classify_triangle(6,6,10),\"6, 6, 10 can make up Isosceles triangle.\")\n self.assertTrue(classify_triangle(4,2,3),\"4, 2, 3 can make up Scalene triangle.\")", "def test_not_a_triangle(self):\n self.assertEqual(classify_triangle(4, 7, 21), 'NotATriangle', '4,7,21 is not a triangle')\n self.assertEqual(classify_triangle(5, 1, 1), 'NotATriangle', '5, 1, 1 is not a triangle')\n self.assertEqual(classify_triangle(2, 6, 2), 'NotATriangle', '2,6,2 is not a triangle')\n self.assertEqual(classify_triangle(1, 1, 5), 'NotATriangle', '1,1,5 is not a triangle')", "def test_classify_triangle(self):\r\n\r\n self.assertEqual(classify_triangle(3, 3, 3), ('Equilateral'))\r\n self.assertEqual(classify_triangle(2, 4, 2), ('Isosceles'))\r\n self.assertEqual(classify_triangle(3, 4, 5), ('Scalene and Right triangle'))", "def test_isosceles_triangle(self):\n self.assertEqual(classify_triangle(2, 2, 3), 'Isosceles', '2,2,4 is Isosceles')\n self.assertEqual(classify_triangle(3, 5, 3), 'Isosceles', '3,5,3 is Isosceles')\n self.assertEqual(classify_triangle(4, 6, 6), 'Isosceles', '4,6,6 is Isosceles')", "def test_touch(self):\n pt = (.5, .5, 0)\n pts = ((0, 0, 7), (1, 0, 8), (1, 1, 9))\n tris = (2, 0, 1)\n\n tri_search = grid.geometry.TriSearch(pts, tris)\n\n self.assertEqual(0, tri_search.triangle_containing_point(pt))", "def test_scalene_triangle(self):\n self.assertEqual(classify_triangle(4, 5, 6), 'Scalene', '4,5,6 is Scalene')\n self.assertEqual(classify_triangle(6, 5, 4), 'Scalene', '6,5,4 is Scalene')\n self.assertEqual(classify_triangle(5, 4, 6), 'Scalene', '5,4,6 is Scalene')\n self.assertEqual(classify_triangle(10, 11, 12), 'Scalene', '10,11,12 is Scalene')", "def test_invalid_zero():\n assert 'invalid' == classify_triangle(0,0,0)", "def test_get_triangle_area():\n v1 = (0,0); v2 = (1,0); v3 = (0,2)\n verticies = [v1,v2,v3]\n expected = 1\n computed = get_triangle_area(verticies)\n tol = 1E-14\n success = abs(expected-computed) < tol\n msg = 'computed area={} != {} (expected)'.format(computed,expected)\n assert success,msg", "def findTriangles(p):\n triangleCount = 0\n for a in range(3, p//3 + 1):\n for b in range(a+1, p//2):\n c = p - (a+b)\n if (a**2 + b**2) == c**2:\n triangleCount += 1\n return triangleCount", "def test_right_triangle(self):\n self.assertEqual(classify_triangle(3, 4, 5), 'Right Scalene')\n self.assertEqual(classify_triangle(10, 6, 8), 'Right Scalene')\n self.assertEqual(classify_triangle(24, 25, 7), 'Right Scalene')\n\n self.assertEqual(classify_triangle(5, 5, 7.07106781187), 'Right Isosceles')\n self.assertEqual(classify_triangle(8, 11.313708499, 8), 'Right Isosceles')\n self.assertEqual(classify_triangle(14.1421356237, 10, 10), 'Right Isosceles')", "def is_triangle(x):\n solution = solve_quad(1, 1, -2*x)\n return max(solution) % 1 == 0", "def test_isosceles():\n assert 'isosceles' == classify_triangle(2,2,3)", "def test_get_triangle_ltz_all_int(self):\n result = get_triangle_type(-1, -1, -1)\n self.assertEqual(result, 'invalid')", "def test_right():\n assert 'right' == classify_triangle(3,4,5)", "def test_equilateral_triangle(self):\n self.assertEqual(classify_triangle(1, 1, 1), 'Equilateral', '1,1,1 is equilateral')\n self.assertEqual(classify_triangle(5, 5, 5), 'Equilateral', '5,5,5 is equilateral')\n self.assertEqual(classify_triangle(10, 10, 10), 'Equilateral', '10,10,10 is equilateral')", "def test_case_05_not_legal_triangle(self):\n self.__assert_equals_test_case([(4, 6, 11)], 'NotATriangle')", "def test_invalid_isosceles():\n assert 'invalid' == classify_triangle(1,1,3)", "def is_triangle(a, b, c):\n a, b, c = sorted([a, b, c])\n return True if a > abs(b - c) and a < (b + c) else False", "def test_case_04_legal_triangle(self):\n self.__assert_not_equal_test_case([(4, 4, 8), (4, 5, 8)], 'NotATriangle')", "def test_invalid_scalene():\n assert 'invalid' == classify_triangle(1,2,3)", "def test_not_a_triangle(self):\r\n self.assertEqual(triangle_classification(1, 1, 2), 'This is not a triangle')\r\n self.assertEqual(triangle_classification(7, 3, 2), 'This is not a triangle')\r\n self.assertNotEqual(triangle_classification(3, 4, 5), 'This is not a triangle')\r\n self.assertEqual(triangle_classification(-1, -3, -2), 'A triangle cannot have any negative or 0 side')\r\n self.assertEqual(triangle_classification(0, 0, 0), 'A triangle cannot have any negative or 0 side')\r\n self.assertIn('This is not a triangle', triangle_classification(1, 2, 3))", "def test_triangle(self):\n result = shape_area.triangle_area(10,5)\n self.assertEqual(result,25)", "def test_right_order():\n assert 'right' == classify_triangle(5,3,4)", "def test_sms_case_1(self):\n pt = (-31.459823375717541, 29.927133417260336, 0)\n\n pts = ((-20.150000000000002, 46.579999999999998, 7),\n (-41.100000000000001, 30.370000000000001, 8),\n (-19.550000000000001, 29.379999999999999, 9))\n tris = (2, 0, 1)\n\n tri_search = grid.geometry.TriSearch(pts, tris)\n\n self.assertEqual(0, tri_search.triangle_containing_point(pt))", "def classify_triangle(x,y,z):\n\n if x > 0 and y > 0 and z > 0 : #if any of length is zero then false\n\n if x == y and x == z and y==z :\n return 'equilateral'\n \n elif x == y and x != z and y != z :\n return 'isoscels'\n \n elif x != y and x != z and y != z and (x*x) + (y*y) != (z*z):\n return 'scalene'\n \n elif (x*x) + (y*y) == (z*z) :\n return 'right'\n\n else:\n return 'false'", "def test_classify_triangle_negative(self):\r\n self.assertEqual(classify_triangle(4, 6, -2), ('Scalene'))", "def pointInTriangle(p, t1, t2, t3):\n\treturn (clockwise(p, t1, t2) and clockwise(p, t2, t3) and clockwise(p, t3, t1)) or (not clockwise(p, t1, t2) and not clockwise(p, t2, t3) and not clockwise(p, t3, t1))", "def test_get_triangle_input_all_int(self):\n result = get_triangle_type('a', \"DEADBEEF\", 'Z')\n self.assertEqual(result, 'invalid')", "def test_get_triangle_equil_all_int(self):\n result = get_triangle_type(1, 1, 1)\n self.assertEqual(result, 'equilateral')" ]
[ "0.71465766", "0.7094666", "0.70703703", "0.69719374", "0.69642067", "0.6925323", "0.6882541", "0.68354255", "0.6797639", "0.67817736", "0.67645806", "0.6760101", "0.67553604", "0.67485696", "0.67330456", "0.6729973", "0.6727655", "0.67141724", "0.66781765", "0.6619821", "0.6618553", "0.6613971", "0.6608082", "0.6564226", "0.6494836", "0.6479837", "0.6463837", "0.6448799", "0.64385074", "0.6406263" ]
0.82844937
0
Test for surface_normal function.
def test_surface_normal(self): vertices = np.array([[0, 1, 0], [0, 0, 0], [1, 0, 0]]) expected = np.array([0, 0, 1]) np.testing.assert_almost_equal(surface_normal(vertices), expected) # Test against multiple triangles vertices = np.r_[vertices[np.newaxis, :, :], [[[0, 0, 0], [0, 2, 0], [2, 0, 0]]]] expected = np.array([[0, 0, 1], [0, 0, -1]]) np.testing.assert_almost_equal(surface_normal(vertices), expected) # Some real data vertices = np.array([[2.435, -1.82, -0.53], [2.635, -2., -0.58], [2.535, -1.7, -0.58]]) expected = np.array([0.33424239, 0.11141413, 0.93587869]) np.testing.assert_almost_equal(surface_normal(vertices), expected) # Test input validation self.assertRaises(ValueError, surface_normal, np.array([[1, 2, 3, 4]]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_normal_always_up(self):\n z_of_normals = []\n for i in range(100):\n neighborhood, pc = create_point_cloud_in_plane_and_neighborhood()\n z_of_normals += list(EigenValueVectorizeFeatureExtractor().extract(pc, neighborhood, None, None, None)[5])\n np.testing.assert_array_less(np.zeros_like(z_of_normals), z_of_normals)", "def surface_norm(self, pt):\n\n return self.normal.normalize()", "def test_2_normal(self):\n print(\"test 2: normal distributions\")\n\n mean = self.means[0]\n dispersion = self.dispersions[0]\n\n for i, x in enumerate(self.X):\n print(i+1, normal(x, mean, dispersion), sep=' : ')", "def test_normal_unit_length(self):\n neighborhood, pc = create_point_cloud_in_plane_and_neighborhood()\n normals = np.array(EigenValueVectorizeFeatureExtractor().extract(pc, neighborhood, None, None, None)[3:6])\n lengths = np.sum(normals * normals, axis=0)\n np.testing.assert_almost_equal(np.ones_like(lengths), lengths)", "def test_normal(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n\r\n random = RandomStreams(utt.fetch_seed())\r\n fn = function([], random.normal((2,2), -1, 2))\r\n fn_val0 = fn()\r\n fn_val1 = fn()\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit\r\n numpy_val0 = rng.normal(-1, 2, size=(2,2))\r\n numpy_val1 = rng.normal(-1, 2, size=(2,2))\r\n\r\n assert numpy.allclose(fn_val0, numpy_val0)\r\n assert numpy.allclose(fn_val1, numpy_val1)", "def get_normal_fluctuation(hover,target,normal,vec):\n\tvector = hover - target\n\tvector = vector - vec*(vector>(vec/2.)) + vec*(vector<(-1*vec/2.))\n\tprojected = planeproject(vector,normal)\n\t#---get the sign of the projection\n\tplane_point = vector+projected\n\tsign = 1.0-2.0*(np.arccos(np.dot(vecnorm(normal),vecnorm(vector)))>np.pi/2.)\n\treturn sign*np.linalg.norm(plane_point)", "def SetNormal(self, *args):\n return _itkSurfaceSpatialObjectPointPython.itkSurfaceSpatialObjectPoint3_SetNormal(self, *args)", "def test_normals(self, faces, point):\n space = self.Space(faces=faces)\n cube_normals = gs.array(\n [\n [0.0, 0.0, 2.0],\n [0.0, 0.0, 2.0],\n [0.0, 2.0, 0.0],\n [0.0, 2.0, 0.0],\n [2.0, 0.0, 0.0],\n [2.0, 0.0, 0.0],\n [0.0, -2.0, 0.0],\n [0.0, -2.0, 0.0],\n [-2.0, 0.0, 0.0],\n [-2.0, 0.0, 0.0],\n [0.0, 0.0, -2.0],\n [0.0, 0.0, -2.0],\n ]\n )\n expected = cube_normals\n\n result = space.normals(point)\n are_close = [\n (gs.allclose(res, exp) or gs.allclose(res, -exp))\n for res, exp in zip(result, expected)\n ]\n\n assert gs.all(are_close)\n\n point = gs.array([point, point])\n result = space.normals(point)\n are_close_0 = [\n (gs.allclose(res, exp) or gs.allclose(res, -exp))\n for res, exp in zip(result[0], expected)\n ]\n are_close_1 = [\n (gs.allclose(res, exp) or gs.allclose(res, -exp))\n for res, exp in zip(result[1], expected)\n ]\n assert gs.all(gs.array([are_close_0, are_close_1]))", "def check_normality(self,alpha = 0.05):\n\n stat1, p = shapiro(self.x)\n \n if self.y is not None:\n stat2, p2 = shapiro(self.y)\n \n if p < alpha:\n if self.y is not None:\n if p2 < alpha:\n self._verbose('x and y do not look Gaussian (reject H0)')\n return False\n else:\n self._verbose('x does not look Gaussian, but y looks Gaussian (fail to reject H0)')\n return True\n else:\n self._verbose('Sample does not look Gaussian (reject H0)')\n return False\n\n else:\n if self.y is not None:\n if p2 < alpha:\n self._verbose('x looks Gaussian, but y does not look Gaussian (fail to reject H0)')\n return False\n else:\n self._verbose('x and y look Gaussian (fail to reject H0)')\n return True\n else:\n self._verbose('Sample looks Gaussian (fail to reject H0)')\n return True", "def is_normal(r, level=0.01):\n if isinstance(r, pd.DataFrame):\n return r.aggregate(is_normal)\n else:\n statistic, p_value = scipy.stats.jarque_bera(r)\n return p_value > level", "def SetNormal(self, *args):\n return _itkSurfaceSpatialObjectPointPython.itkSurfaceSpatialObjectPoint2_SetNormal(self, *args)", "def GetNormal(self):\n ...", "def check_normality(serie: List[float], alpha: int = 0.05) -> bool:\n stat, p = stats.shapiro(serie)\n print('Statistics=%.3f, p=%.3f' % (stat, p))\n\n if p > alpha:\n return True\n else:\n return False", "def _update_surface_normals(self):\n\n # This is the case if there are too few points to\n # compute normals so there can be values to remove\n\n #can be important for parallel\n self.swarm.shadow_particles_fetch()\n\n if self.empty:\n self.director.data[...] = 0.0\n else:\n\n particle_coords = self.swarm.particleCoordinates.data\n\n Nx = np.empty(self.swarm.particleLocalCount)\n Ny = np.empty(self.swarm.particleLocalCount)\n Nz = np.empty(self.swarm.particleLocalCount)\n\n for i, xyz in enumerate(particle_coords):\n r, neighbours = self.kdtree.query(particle_coords[i], k=4)\n\n # this point is neighbour[0] and neighbour points are neighbours[(1,2,3)]\n XYZ1 = self.kdtree.data[neighbours[1]]\n XYZ2 = self.kdtree.data[neighbours[2]]\n XYZ3 = self.kdtree.data[neighbours[3]]\n\n dXYZ1 = XYZ2 - XYZ1\n dXYZ2 = XYZ3 - XYZ1\n\n # Cross product of those 2 vectors can be use as the local normal (perhaps)\n\n Nx[i], Ny[i], Nz[i] = np.cross(dXYZ1, dXYZ2)\n #if i == 0:\n # print(Nx, Ny, Nz)\n # print(xyz[0], xyz[1],xyz[2])\n # print((self.insidePt[0] - xyz[0]) * Nx[i] )\n\n if (self.insidePt):\n sign = np.sign( (self.insidePt[0] - xyz[0]) * Nx[i] +\n (self.insidePt[1] - xyz[1]) * Ny[i] +\n (self.insidePt[2] - xyz[2]) * Nz[i] )\n Nx[i] *= sign\n Ny[i] *= sign\n Nz[i] *= sign\n\n\n for i in range(0, self.swarm.particleLocalCount):\n scale = 1.0 / np.sqrt(Nx[i]**2 + Ny[i]**2 + Nz[i]**2)\n Nx[i] *= scale\n Ny[i] *= scale\n Nz[i] *= scale\n\n\n self.director.data[:,0] = Nx[:]\n self.director.data[:,1] = Ny[:]\n self.director.data[:,2] = Nz[:]\n\n print(\"Surf Norms\")\n\n return", "def normal(self, uv):\n res = GeomLProp_SLProps(self.surface(), uv[0], uv[1], 1, 1e-9)\n if not res.IsNormalDefined():\n return (0, 0, 0)\n normal = geom_utils.gp_to_numpy(res.Normal())\n if self.reversed():\n normal = -normal\n return normal", "def _sampling_normal(self, args):\n z_mean, z_log_var = args\n return sampling_normal(z_mean, z_log_var, (None, self.latent_dim))", "def _sampling_normal(self, args):\n z_mean, z_log_var = args\n return sampling_normal(z_mean, z_log_var, (None, self.latent_dim))", "def _sampling_normal(self, args):\n z_mean, z_log_var = args\n return sampling_normal(z_mean, z_log_var, (None, self.latent_dim))", "def _sampling_normal(self, args):\n z_mean, z_log_var = args\n return sampling_normal(z_mean, z_log_var, (None, self.latent_dim))", "def _sampling_normal(self, args):\n z_mean, z_log_var = args\n return sampling_normal(z_mean, z_log_var, (None, self.latent_dim))", "def normal(self, point):\n point = self._center - np.array(point)\n # if abs(point.dot(point) - self._radius**2) > 1e-15:\n # raise RayTraceError(\n # 'Cannot compute normal. Point is too far from surface ({}).'.format(\n # (abs(point.dot(point) - self._radius**2))))\n return normalize(point / self._radius)", "def Normals(self, show_plot=False):\n\n ndim = self.InferSpatialDimension()\n if self.element_type == \"tet\" or self.element_type == \"hex\":\n self.GetBoundaryFaces()\n self.GetBoundaryEdges()\n elif self.element_type == \"tri\" or self.element_type == \"quad\":\n self.GetBoundaryEdges()\n\n if self.element_type == \"tet\" or self.element_type == \"hex\":\n normals = self.FaceNormals()\n elif self.element_type == \"tri\" or self.element_type == \"quad\" or self.element_type == \"line\":\n if self.points.shape[1] == 3:\n normals = self.FaceNormals()\n else:\n if self.element_type == \"tri\" or self.element_type == \"quad\":\n edges = self.edges\n elif self.element_type == \"line\":\n edges = self.elements\n\n edge_coords = self.points[edges[:,:2],:]\n p1p0 = edge_coords[:,1,:] - edge_coords[:,0,:]\n\n normals = np.zeros_like(p1p0)\n normals[:,0] = -p1p0[:,1]\n normals[:,1] = p1p0[:,0]\n norm_normals = np.linalg.norm(normals,axis=1)\n normals[:,0] /= norm_normals\n normals[:,1] /= norm_normals\n\n # CHECK IF THE NORMAL IS OUTWARD - FOR LINES DIRECTIONALITY DOES NOT MATTER\n if self.element_type == \"tri\" or self.element_type == \"quad\":\n self.GetElementsWithBoundaryEdges()\n meds = self.Medians()\n edge_element_meds = meds[self.boundary_edge_to_element[:,0],:]\n p1pm = edge_coords[:,1,:] - edge_element_meds\n # IF THE DOT PROUCT OF NORMALS AND EDGE-MED NODE VECTOR IS NEGATIVE THEN FLIP\n _check = np.einsum(\"ij,ij->i\",normals,p1pm)\n normals[np.less(_check,0.)] = -normals[np.less(_check,0.)]\n\n\n if show_plot:\n\n if ndim == 2:\n mid_edge_coords = 0.5*(edge_coords[:,1,:] + edge_coords[:,0,:])\n\n import matplotlib.pyplot as plt\n figure = plt.figure()\n\n self.SimplePlot(figure=figure, show_plot=False)\n\n q = plt.quiver(mid_edge_coords[:,0], mid_edge_coords[:,1],\n normals[:,0], normals[:,1],\n color='Teal', headlength=5, width=0.004)\n\n plt.axis('equal')\n plt.axis('off')\n plt.tight_layout()\n plt.show()\n\n\n elif ndim == 3:\n faces = self.faces\n if self.element_type == \"tri\" or self.element_type == \"quad\":\n faces = self.elements\n mid_face_coords = np.sum(self.points[faces,:3],axis=1)/faces.shape[1]\n\n import os\n os.environ['ETS_TOOLKIT'] = 'qt4'\n from mayavi import mlab\n\n figure = mlab.figure(bgcolor=(1,1,1),fgcolor=(1,1,1),size=(1000,800))\n\n self.SimplePlot(figure=figure, show_plot=False)\n\n mlab.quiver3d(mid_face_coords[:,0], mid_face_coords[:,1], mid_face_coords[:,2],\n normals[:,0], normals[:,1], normals[:,2],\n color=(0.,128./255,128./255),line_width=5)\n mlab.show()\n\n return normals", "def normal(self, position):\n return self._normal", "def random_normal():\r\n return inverse_normal_cdf(random.random())", "def testNorm(self):\n assert(Vector(0, 3, 4).norm() == 5)\n assert(Vector(3, 4).norm() == 5)\n assert Vector(0, 3, 0, 0, 4, 0, size=10).norm() == 5", "def test_normal(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n m = Module()\r\n m.random = RandomStreams(utt.fetch_seed())\r\n m.fn = Method([], m.random.normal((2,2), -1, 2))\r\n\r\n made = m.make()\r\n made.random.initialize()\r\n fn_val0 = made.fn()\r\n fn_val1 = made.fn()\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit\r\n numpy_val0 = rng.normal(-1, 2, size=(2,2))\r\n numpy_val1 = rng.normal(-1, 2, size=(2,2))\r\n\r\n assert numpy.allclose(fn_val0, numpy_val0)\r\n assert numpy.allclose(fn_val1, numpy_val1)", "def test_normalization(self):\n u = np.array([np.array([0.7, 1.2]), np.array([0.5, 1.6])])\n with tf.Session() as sess:\n n = sess.run(AbstractModel.l2_normalization_layer(u, axis=1))\n magnitude = np.linalg.norm(n, axis=1)\n np.testing.assert_allclose(magnitude, np.array([1.0, 1.0]))", "def random_normal():\n return inverse_normal_cdf(random.random())", "def normalisable(self):\n\n return np.abs(np.nansum(self.data)) > 0", "def unit_normals(p,q,r): \n vx1 = p[0] - r[0] # x1 - x3. \n vy1 = p[1] - r[1] # y1 - y3. \n vz1 = p[2] - r[2] # z1 - z3. \n\n vx2 = q[0] - r[0] # x2 - x3. \n vy2 = q[1] - r[1] # y2 - y3. \n vz2 = q[2] - r[2] # z2 - z3. \n\n vnx = vy1*vz2 - vz1*vy2 \n vny = vz1*vx2 - vx1*vz2 \n vnz = vx1*vy2 - vy1*vx2 \n\n len_vn = math.sqrt(vnx*vnx + vny*vny + vnz*vnz) \n vnx = vnx/len_vn \n vny = vny/len_vn \n vnz = vnz/len_vn \n\n return vnx, vny, vnz" ]
[ "0.7123382", "0.6521624", "0.6423122", "0.6392963", "0.6290798", "0.61473745", "0.61183316", "0.61182386", "0.60976636", "0.6091063", "0.60830945", "0.6079103", "0.6078365", "0.60698634", "0.60317665", "0.5993436", "0.5993436", "0.5993436", "0.5993436", "0.5993436", "0.5944836", "0.58578366", "0.58555275", "0.58438426", "0.5837974", "0.58147204", "0.5799891", "0.5751358", "0.57347226", "0.57340807" ]
0.8323997
0
Test for _nearest_neighbour_1d function.
def test_nearest_neighbour_1d(self): x = np.array([2., 1., 4., 5., 3.]) x_new = np.array([-3, 0, 1.2, 3, 3, 2.5, 4.7, 6]) val, ind = _nearest_neighbour_1d(x, x_new) np.testing.assert_array_equal(val, [1., 1., 1., 3., 3., 2., 5., 5.]) np.testing.assert_array_equal(ind, [1, 1, 1, 4, 4, 0, 3, 3])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_nearest_neighbour_regular_1d():\n # test with regular grid and 1d coords\n grid_lon = np.arange(100)\n grid_lat = np.arange(50)\n data = np.zeros((50, 100))\n\n # the four nearest values for the first point\n data[20:22, 10:12] = 7\n\n # the four nearest values for the second point\n data[17:19, 13:15] = 8\n\n # the actual test\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, (10.2, 13.2), (20.2, 17.2), npoints=4)(data)\n np.testing.assert_array_almost_equal(res, [7, 8])\n\n # same test, but with 3d-data (e.g., level, lat, lon)\n data2 = np.zeros((10, 50, 100))\n for i in range(10):\n data2[i, :, :] = data + i\n\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, (10.2, 13.2), (20.2, 17.2), npoints=4)(data2)\n np.testing.assert_array_almost_equal(res, np.asarray([np.arange(7, 17, 1), np.arange(8, 18, 1)]).transpose())\n\n # same test with only one neighbour or only one target point\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, (10.2, 13.2), (20.2, 17.2), npoints=1)(data)\n np.testing.assert_array_almost_equal(res, [7, 8])\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, 10.2, 20.2, npoints=1)(data)\n np.testing.assert_array_almost_equal(res, 7)\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, 13.2, 17.2, npoints=1)(data2)\n np.testing.assert_array_almost_equal(res, np.arange(8, 18, 1).reshape(10, 1))", "def test_test_nearest_neighbour_dmean():\n # test with regular grid and 1d coords\n grid_lon = np.arange(100)\n grid_lat = np.arange(50)\n data = np.zeros((50, 100))\n\n # the four nearest values for the first point\n data[20, 10] = 7\n\n # the four nearest values for the second point\n data[17, 13] = 8\n\n # the actual test\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, (10, 13), (20, 17), npoints=2, method=\"d-mean\")(data)\n np.testing.assert_array_almost_equal(res, [5.6, 6.4])", "def test_nearest_neighbour_unstructured():\n # create coordinates\n grid_lon = np.arange(100)\n grid_lat = np.ones(100)\n data = np.zeros(100)\n\n # the nearest 3 points\n data[10:13] = 7\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, (11.2, 2.2), (11.2, 13.2), npoints=3, src_grid=\"unstructured\")(data)\n np.testing.assert_array_almost_equal(res, [7, 0])\n\n # same test, but with 2d-data (e.g., level, ncell)\n data2 = np.zeros((10, 100))\n for i in range(10):\n data2[i, :] = data + i\n\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, (11.2, 2.2), (11.2, 13.2), npoints=3, src_grid=\"unstructured\")(data2)\n np.testing.assert_array_almost_equal(res, np.asarray([np.arange(7, 17, 1), np.arange(0, 10, 1)]).transpose())\n\n # only one point\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, 11.2, 13.2, npoints=3, src_grid=\"unstructured\")(data)\n np.testing.assert_almost_equal(res, 7)\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, 11.2, 13.2, npoints=3, src_grid=\"unstructured\")(data2)\n np.testing.assert_array_almost_equal(res, np.arange(7, 17, 1).reshape(10, 1))\n\n # same test with one one neighbour point\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, 11.2, 13.2, npoints=1, src_grid=\"unstructured\")(data)\n np.testing.assert_almost_equal(res, 7)\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, 11.2, 13.2, npoints=1, src_grid=\"unstructured\")(data2)\n np.testing.assert_almost_equal(res, np.arange(7, 17, 1).reshape(10, 1))", "def test_nearest_neighbour_regular_2d():\n # test with regular grid and 2d coords\n grid_lon, grid_lat = np.meshgrid(np.arange(100), np.arange(50), indexing=\"ij\")\n data = np.zeros((100, 50))\n\n # the four nearest values for the first point\n data[10:12, 20:22] = 7\n\n # the four nearest values for the second point\n data[13:15, 17:19] = 8\n\n # the actual test\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, (10.2, 13.2), (20.2, 17.2), npoints=4)(data)\n np.testing.assert_array_almost_equal(res, [7, 8])\n\n # same test, but with 3d-data (e.g., level, lon, lat)\n data2 = np.zeros((10, 100, 50))\n for i in range(10):\n data2[i, :, :] = data + i\n\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, (10.2, 13.2), (20.2, 17.2), npoints=4)(data2)\n np.testing.assert_array_almost_equal(res, np.asarray([np.arange(7, 17, 1), np.arange(8, 18, 1)]).transpose())\n\n # same test with one neighbour point\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, 10.2, 20.2, npoints=1)(data2)\n np.testing.assert_array_almost_equal(res, np.arange(7, 17, 1).reshape(10, 1))\n res = enstools.interpolation.nearest_neighbour(grid_lon, grid_lat, (10.2, 13.2), (20.2, 17.2), npoints=1)(data2)\n np.testing.assert_array_almost_equal(res, np.asarray([np.arange(7, 17, 1), np.arange(8, 18, 1)]).transpose())", "def test_nearest_location_adjacent():\n locations = [(1, 3), (3, 5)]\n\n assert nearest_location(locations, 2) == 0\n assert nearest_location(locations, 3) == 1", "def nearest_neighbor(data):\n features = set([i for i, x in enumerate(data[0][1])])\n return leave_one_out_cross_validation(data, features)", "def nearest(*args) -> core.Nearest:\n X, Y, kws = util.parseargs(*args)\n return core.Nearest(X, Y, **kws)", "def test_nearest_boundary_odd():\n assert _nearest_boundary(10, 19, 14, 0) == 0\n assert _nearest_boundary(10, 19, 14, 1) == 1", "def knnForOne(x_training_data, y_training_data, single_x_test_data, n_neighbors):\n nearest_neighbors = {}\n length = len(x_training_data)\n\n for i in range(length):\n X2 = x_training_data[i,:] # get current row of known data\n Y2 = y_training_data[i] # get current label of known data\n distance = getDistance(single_x_test_data, X2) # compare test to known data\n\n if len(nearest_neighbors) < n_neighbors: # reach capacity of nearest neighbors\n nearest_neighbors[distance] = Y2\n else: # kick out largest distance\n\n # Assumes no two distances are exactly the same, or data point will be overwritten in dictionary\n largest_distance = max(nearest_neighbors)\n if distance < largest_distance:\n del nearest_neighbors[largest_distance]\n nearest_neighbors[distance] = Y2\n\n # nearest_neighbors is a dictionary with the n nearest neighbors \n # as values and their distances from single_x_test_data as keys\n \n counts = {}\n for key in nearest_neighbors: # initialize counts dictionary\n counts[nearest_neighbors[key]] = 0\n\n for key in nearest_neighbors: # count labels within the nearest neighbors\n counts[nearest_neighbors[key]] += 1\n\n max_value = max(counts.values()) # find most frequent label within the nearest neighbors\n for key in counts:\n if counts[key] == max_value:\n return key", "def one_nearest_neighbors(X, Y, X_test):\n M, N = X_test.shape\n \n # calculate Eucledian distance between a(m,n) and b(m,n)\n eucl_dist = lambda a, b: np.sqrt(np.sum((a-b)**2, axis=1))\n \n # calculate all distances between test and training points\n dist = np.array([eucl_dist(x_test, X) for x_test in X_test])\n \n # get indexi of smallest distances\n nn_idx = np.argmin(dist, axis=1)\n\n # assign to class of nearest neighbor\n pred = Y[nn_idx]\n \n return pred", "def test_nearest_location_odd():\n assert nearest_location([(3, 6), (9, 13)], 7) == 0\n assert nearest_location([(3, 6), (9, 13)], 7, 1) == 1", "def find_nearest_neighbour_from_point(point_cloud:np.ndarray, point:int) -> int:\n pass", "def test_nearest_location_even():\n assert nearest_location([(3, 6), (8, 13)], 6, 0) == 0\n assert nearest_location([(3, 6), (8, 13)], 6, 1) == 0\n assert nearest_location([(3, 6), (8, 13)], 7, 0) == 1\n assert nearest_location([(3, 6), (8, 13)], 7, 1) == 1", "def test_nearest_boundary_even():\n assert _nearest_boundary(10, 20, 14, 0) == 0\n assert _nearest_boundary(10, 20, 14, 1) == 0\n assert _nearest_boundary(10, 20, 15, 0) == 1\n assert _nearest_boundary(10, 20, 15, 1) == 1", "def test_nn_point(test_data):\n xp, yp, z = test_data\n\n tri = Delaunay(list(zip(xp, yp)))\n\n sim_gridx = [30]\n sim_gridy = [30]\n\n members, tri_info = find_natural_neighbors(tri,\n list(zip(sim_gridx, sim_gridy)))\n\n val = nn_point(xp, yp, z, [sim_gridx[0], sim_gridy[0]],\n tri, members[0], tri_info)\n\n truth = 1.009\n\n assert_almost_equal(truth, val, 3)", "def find_all_nearest_neighbours(point_cloud:np.ndarray) -> np.ndarray:\n pass", "def test_nearest_location():\n locations = [(10, 20), (30, 40), (50, 60)]\n\n assert nearest_location(locations, 8) == 0\n assert nearest_location(locations, 15) == 0\n assert nearest_location(locations, 22) == 0\n\n assert nearest_location(locations, 28) == 1\n assert nearest_location(locations, 35) == 1\n assert nearest_location(locations, 42) == 1\n\n assert nearest_location(locations, 48) == 2\n assert nearest_location(locations, 55) == 2\n assert nearest_location(locations, 62) == 2", "def nearestNeighbours(xObs, xMod):\n\n\txObs=np.asarray(xObs)\n\txMod=np.asarray(xMod)\n\tkept=np.copy(xMod)\n\tLObs=len(xObs)\n\tLMod=len(xMod)\n\txObs=np.expand_dims(xObs, axis=1)\n\txMod=np.expand_dims(xMod, axis=1)\n\txObs=np.repeat(xObs, LMod, axis=1)\n\txMod=np.repeat(xMod, LObs, axis=1)\n\txMod=xMod.T\n\tdiffs=xObs-xMod\n\t#interesting point: the smallest point (the one you are looking for) will be\n\t#the point just before the first negative value in a row\n\t#this could be used in an alternative method much to your advantage\n\ttemp=np.greater(diffs,0)\n\taltered=temp*diffs + np.invert(temp)*(10**30)\n\tmins=altered.min(1)\t\n\tmins=np.expand_dims(mins, axis=1)\n\tmins=np.repeat(mins, LMod, axis=1)\n\tplaced=np.equal(mins, diffs)*np.repeat(np.expand_dims(np.arange(0,LMod), axis=1), LObs, axis=1).T\n\tplaced1=np.sum(placed, axis=1)\n\tclosest1=kept[placed1]\n\tplaced2=np.add(placed1,1)\n\t#below deals with the fringe case; when there is no model x value greater than\n\t#a specific observation x value \n\ttemp=np.where(placed2 > (len(kept)-1))\n\tplaced2[temp]=placed2[temp]-1\n\tclosest2=kept[placed]\n\t#print(\"-----------------\")\n\t#print(closest1, closest2)\n\treturn placed1, placed2, closest1", "def nearest_neighbor(non_cancer_data, cancer_data):\r\n #status is based on the mean of smallest elements of cancer and non_cancer groups\r\n #sort\r\n non_cancer_data = sorted(non_cancer_data)\r\n cancer_data = sorted(cancer_data)\r\n\r\n #choose nearest neighbors\r\n non_cancer_neighbor = np.mean(non_cancer_data[:2])\r\n cancer_neighbor = np.mean(cancer_data[:2])\r\n\r\n #etermine cell status\r\n if non_cancer_neighbor > cancer_neighbor:\r\n print(\"Negative\")\r\n else:\r\n print(\"Positive\")", "def find_nearest_neighbor(src, dst):\n return sp.spatial.KDTree(dst).query(src)", "def test_1d_constant_interpolation_basic(self):\n\n # Define pixel centers along each direction\n x = numpy.array([1.0, 2.0, 4.0])\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x)))\n\n # Define values for each x, y pair as a linear function\n for i in range(len(x)):\n A[i] = linear_function(x[i], 0)\n\n # Then test that interpolated points are always assigned value of\n # closest neighbour\n xis = numpy.linspace(x[0], x[-1], 10)\n points = xis\n\n vals = interpolate1d(x, A, points, mode='constant')\n\n # Find upper neighbours for each interpolation point\n xi = points[:]\n idx = numpy.searchsorted(x, xi, side='left')\n\n # Get the neighbours for each interpolation point\n x0 = x[idx - 1]\n x1 = x[idx]\n\n z0 = A[idx - 1]\n z1 = A[idx]\n\n # Location coefficients\n alpha = (xi - x0) / (x1 - x0)\n\n refs = numpy.zeros(len(vals))\n for i in range(len(refs)):\n if alpha[i] < 0.5:\n refs[i] = z0[i]\n\n if alpha[i] >= 0.5:\n refs[i] = z1[i]\n\n assert numpy.allclose(vals, refs, rtol=1e-12, atol=1e-12)", "def test_nearest(self):\n dist = station.nearest(28.43, -81.31)\n stn = dist.pop(\"station\")\n self.assertIsInstance(stn, station.Station)\n self.assertEqual(stn.icao, \"KMCO\")\n for val in dist.values():\n self.assertIsInstance(val, float)\n for *params, count in (\n (30, -82, 10, True, True, 0.2, 1),\n (30, -82, 10, True, False, 0.2, 5),\n (30, -82, 10, False, False, 0.2, 6),\n (30, -82, 1000, True, True, 0.5, 6),\n (30, -82, 1000, False, False, 0.5, 37),\n ):\n stations = station.nearest(*params)\n self.assertEqual(len(stations), count)\n for dist in stations:\n stn = dist.pop(\"station\")\n self.assertIsInstance(stn, station.Station)\n for val in dist.values():\n self.assertIsInstance(val, float)", "def no_neighbour(x: int, y: int) -> bool:\r\n if not wall_check(x, y-1, False):\r\n if example[x, y-1] == 0:\r\n return False\r\n if not wall_check(x, y+1, False):\r\n if example[x, y+1] == 0:\r\n return False\r\n if not wall_check(x+1, y, False):\r\n if example[x+1, y] == 0:\r\n return False\r\n if not wall_check(x-1, y, False):\r\n if example[x-1, y] == 0:\r\n return False\r\n return True", "def checkNumNeighbors():", "def investigate(self, nearest_neighbors):\n pass", "def test_nearest(self):\n for lat, lon, icao in ((28.43, -81.31, \"KMCO\"), (28.43, -81, \"KTIX\")):\n stn, dist = station.Station.nearest(lat, lon, is_airport=True)\n self.assertIsInstance(stn, station.Station)\n self.assertEqual(stn.icao, icao)\n for val in dist.values():\n self.assertIsInstance(val, float)\n # Test with IATA req disabled\n stn, dist = station.Station.nearest(28.43, -81, False, False)\n self.assertIsInstance(stn, station.Station)\n self.assertEqual(stn.icao, \"FA18\")\n for val in dist.values():\n self.assertIsInstance(val, float)", "def nearest_neighbour(matrix, start=0):\n path = [start]\n while len(matrix) != len(path):\n matrix[:, start] = numpy.inf\n start = numpy.argmin(matrix[start])\n path.append(start)\n return path", "def neighbor(self, m, k, func):\n data = np.random.random((m, k))\n target = np.random.random(k)\n tree = KDTree(data)\n dist, index = tree.query(target)\n point = tree.data[index]\n spoint, sdist = func(data, target) # func solves the problem\n p1 = self.numTest(point, spoint,\n \"\\n\\t\"+func.__name__+\"() failed: incorrect nearest neighbor\")\n p2 = self.numTest(dist, sdist, \n \"\\n\\t\"+func.__name__+\"() failed: incorrect minimum distance\")\n return p1 + p2", "def test_natural_neighbor(test_data, test_grid):\n xp, yp, z = test_data\n xg, yg = test_grid\n\n img = natural_neighbor(xp, yp, z, xg, yg)\n\n with get_test_data('nn_bbox0to100.npz') as fobj:\n truth = np.load(fobj)['img']\n\n assert_array_almost_equal(truth, img)", "def test_k_nearest(self):\n L = range(100)\n L = [(i, i, i, i) for i in L]\n tree = KdTree(L)\n # remove distance, only keep points from the result\n items = lambda items: [x for (d, x) in items] \n assert items(tree.k_nearest((-1, -1), 1)) == [(0, 0, 0, 0)]\n assert items(tree.k_nearest((100, 100), 1)) == [(99, 99, 99, 99)]\n assert items(tree.k_nearest((50, 50), 1)) == [(50, 50, 50, 50)]\n assert items(tree.k_nearest((-1, -1), 2)) == [(0, 0, 0, 0),\n (1, 1, 1, 1)]" ]
[ "0.69601023", "0.68331033", "0.6460805", "0.6430839", "0.6326999", "0.62910575", "0.628301", "0.6238423", "0.6171075", "0.61605567", "0.615175", "0.61391455", "0.6023545", "0.59705627", "0.5882969", "0.5794502", "0.57871974", "0.5771276", "0.57696116", "0.57694775", "0.5758734", "0.5751191", "0.57458323", "0.5732917", "0.57195973", "0.57116884", "0.5701165", "0.56755227", "0.5660837", "0.5631809" ]
0.8333619
0
Test MesoscopeFOV.register_fov method. Note this doesn't actually hit Alyx. Also this doesn't test stack creation.
def test_register_fov(self): task = MesoscopeFOV(self.session_path, device_collection='raw_imaging_data', one=self.one) mlapdv = {'topLeft': [2317.2, -1599.8, -535.5], 'topRight': [2862.7, -1625.2, -748.7], 'bottomLeft': [2317.3, -2181.4, -466.3], 'bottomRight': [2862.7, -2206.9, -679.4], 'center': [2596.1, -1900.5, -588.6]} meta = {'FOV': [{'MLAPDV': mlapdv, 'nXnYnZ': [512, 512, 1], 'roiUUID': 0}]} with unittest.mock.patch.object(self.one.alyx, 'rest') as mock_rest: task.register_fov(meta, 'estimate') calls = mock_rest.call_args_list self.assertEqual(3, len(calls)) args, kwargs = calls[1] self.assertEqual(('fields-of-view', 'create'), args) expected = {'data': {'session': None, 'imaging_type': 'mesoscope', 'name': 'FOV_00', 'stack': None}} self.assertEqual(expected, kwargs) args, kwargs = calls[2] self.assertEqual(('fov-location', 'create'), args) expected = ['field_of_view', 'default_provenance', 'coordinate_system', 'n_xyz', 'provenance', 'x', 'y', 'z', 'brain_region'] self.assertCountEqual(expected, kwargs.get('data', {}).keys()) self.assertEqual(5, len(kwargs['data']['brain_region'])) self.assertEqual([512, 512, 1], kwargs['data']['n_xyz']) self.assertIs(kwargs['data']['field_of_view'], mock_rest().get('id')) self.assertEqual('E', kwargs['data']['provenance']) self.assertEqual([2317.2, 2862.7, 2317.3, 2862.7], kwargs['data']['x']) # Check dry mode with suffix input = None for file in self.session_path.joinpath('alf', 'FOV_00').glob('mpciMeanImage.*'): file.replace(file.with_name(file.name.replace('_estimate', ''))) self.one.mode = 'local' with unittest.mock.patch.object(self.one.alyx, 'rest') as mock_rest: out = task.register_fov(meta, None) mock_rest.assert_not_called() self.assertEqual(1, len(out)) self.assertEqual('FOV_00', out[0].get('name')) locations = out[0]['location'] self.assertEqual(1, len(locations)) self.assertEqual('L', locations[0].get('provenance', 'L'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_f_from_hfov(self):\n width = 700\n height = 480\n hfov = 60\n vfov = 60\n\n # TODO(marcus): make sure these expected values are correct!\n actual = tesse_ros_bridge.utils.fx_from_hfov(hfov, width)\n expected = 606.2177826491071\n self.assertEqual(actual, expected)\n\n actual = tesse_ros_bridge.utils.fy_from_vfov(vfov, height)\n expected = 415.69219381653056\n self.assertEqual(actual, expected)", "def test_vfov_from_hfov(self):\n width = 700\n height = 480\n hfov = 60\n\n # TODO(marcus): make sure these expected values are correct!\n actual = tesse_ros_bridge.utils.vfov_from_hfov(hfov, width, height)\n expected = 43.19696059328124\n self.assertEqual(actual, expected)", "def fov(self, fov):\n self.ptr.fov(fov)", "def test_get_frustum_parameters() -> None:\n fx, fy = 1000, 1000\n img_w = 2000\n img_h = 1000\n\n pinhole_camera = _create_pinhole_camera(\n fx_px=fx,\n fy_px=fy,\n cx_px=img_w / 2,\n cy_px=img_h / 2,\n height_px=img_h,\n width_px=img_w,\n cam_name=\"ring_front_center\", # dummy name\n )\n\n fov_theta_deg = np.rad2deg(pinhole_camera.fov_theta_rad)\n assert np.isclose(fov_theta_deg, 90.0)\n\n # for identity SE(3), the yaw angle is zero radians.\n cam_yaw_ego = pinhole_camera.egovehicle_yaw_cam_rad\n assert np.isclose(cam_yaw_ego, 0)", "def create_cam_fov(self, name):\n\n # Vertices of FOV\n V = [\n (0, 0, -self.SAT_PROPS[\"Alt\"]),\n tuple(self.CAM_PROPS[name][\"Intercepts\"][:, 0]),\n tuple(self.CAM_PROPS[name][\"Intercepts\"][:, 1]),\n tuple(self.CAM_PROPS[name][\"Intercepts\"][:, 2]),\n tuple(self.CAM_PROPS[name][\"Intercepts\"][:, 3])\n ]\n\n # Faces of FOV\n F = [(0, 1, 2), (0, 2, 3), (0, 3, 4), (0, 4, 1)]\n\n # Create building blocks of polydata\n cam = vtk.vtkPolyData()\n points = vtk.vtkPoints()\n polys = vtk.vtkCellArray()\n scalars = vtk.vtkFloatArray()\n\n # Load the point, cell and data attributes\n for i in range(5):\n points.InsertPoint(i, V[i])\n for i in range(4):\n polys.InsertNextCell( self.mkVtkIdList(F[i]))\n for i in range(5):\n scalars.InsertTuple1(i,i)\n\n # Assign the pieces to the vtkPolyData.\n cam.SetPoints(points)\n del points\n cam.SetPolys(polys)\n del polys\n cam.GetPointData().SetScalars(scalars)\n del scalars\n\n # Mapper\n mapper = vtk.vtkPolyDataMapper()\n mapper.SetInputData(cam)\n mapper.ScalarVisibilityOff()\n\n # Actor\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n actor.GetProperty().SetColor(0.5, 1, 0.5)\n actor.GetProperty().SetAmbient(0.5)\n actor.GetProperty().SetOpacity(0.1)\n\n return actor", "def setFov(self,fov):\n self.light.node().getLens().setFov(fov)", "def update_fov(self) -> None:\n self.game_map.visible[:] = compute_fov(\n self.game_map.tiles[\"transparent\"],\n (self.player.x, self.player.y),\n radius=8,\n )\n # If a tile is \"visible\" it should be added to \"explored\".\n self.game_map.explored |= self.game_map.visible", "def test_frustum_planes_ring_cam() -> None:\n near_clip_dist = 6.89 # arbitrary value\n\n # Set \"focal_length_x_px_\"\n fx_px = 1402.4993697398709\n\n # Set \"focal_length_y_px_\"\n fy_px = 1405.1207294310225\n\n # Set \"focal_center_x_px_\"\n cx_px = 957.8471720086527\n\n # Set \"focal_center_y_px_\"\n cy_px = 600.442948946496\n\n camera_name = \"ring_front_right\"\n height_px = 1550\n width_px = 2048\n\n pinhole_camera = _create_pinhole_camera(\n fx_px=fx_px,\n fy_px=fy_px,\n cx_px=cx_px,\n cy_px=cy_px,\n height_px=height_px,\n width_px=width_px,\n cam_name=camera_name,\n )\n (\n left_plane,\n right_plane,\n near_plane,\n bottom_plane,\n top_plane,\n ) = pinhole_camera.frustum_planes(near_clip_dist)\n\n left_plane_expected: NDArrayFloat = np.array([fx_px, 0.0, width_px / 2.0, 0.0])\n right_plane_expected: NDArrayFloat = np.array([-fx_px, 0.0, width_px / 2.0, 0.0])\n near_plane_expected: NDArrayFloat = np.array([0.0, 0.0, 1.0, -near_clip_dist])\n bottom_plane_expected: NDArrayFloat = np.array([0.0, -fx_px, height_px / 2.0, 0.0])\n top_plane_expected: NDArrayFloat = np.array([0.0, fx_px, height_px / 2.0, 0.0])\n\n assert np.allclose(\n left_plane, left_plane_expected / np.linalg.norm(left_plane_expected)\n )\n assert np.allclose(\n right_plane, right_plane_expected / np.linalg.norm(right_plane_expected)\n )\n assert np.allclose(\n bottom_plane, bottom_plane_expected / np.linalg.norm(bottom_plane_expected)\n )\n assert np.allclose(\n top_plane, top_plane_expected / np.linalg.norm(top_plane_expected)\n )\n assert np.allclose(near_plane, near_plane_expected)", "def test_simple_pass():\n m = view(nybb)\n m = view(world)\n m = view(cities)\n m = view(world.geometry)", "def test_build_surface_registry(ordered_pvarray):\n\n ordered_pvarray.cast_shadows()\n reg = ordered_pvarray.surface_registry\n\n assert reg.shape[0] == ordered_pvarray.n_surfaces\n assert reg.shape[1] == len(ordered_pvarray.registry_cols)", "def test_voxel_io(self):\n points = np.array([[0.1, 0.1, 0.1], [1.1, 1.1, 1.1], [1.3, 1.2, 1.4]])\n vg = VoxelGrid(0.5, min_corner=Vector3f(0, 0, 0), points=points)\n po = ProjectObject.gen_voxels_object(\n id=\"foobar\", voxels=vg, pose=self.pose, category=\"chair\"\n )\n self.assertIsInstance(self.temp_directory, str)\n po_xml = po.save(self.temp_directory)\n po2 = ProjectObject.load(\"voxels\", po_xml, self.temp_directory)\n\n self.assertTrue(po2.almost_equal(po))", "def createCameraAndPointCloud(mochaFbxFilePath=None):\n if mochaFbxFilePath == None:\n gG()\n else:\n cQ(mochaFbxFilePath)\n cR(mochaFbxFilePath)\n return", "def test_ipam_vrfs_create(self):\n pass", "def fov(self, fov: float):\n assert type(fov) in (int, float)\n self._fov[self.projection_mode.value] = fov\n self._reset_matrix()", "def setup(self):\n self.debug(\"Setup ..\")\n\n if self.pipeline.settings.useHardwarePCF:\n self.error(\n \"Global Illumination does not work in combination with PCF!\")\n import sys\n sys.exit(0)\n return\n\n self.settings = VoxelSettingsManager()\n self.settings.loadFromFile(join(self.sceneRoot, \"voxels.ini\"))\n\n self.debug(\n \"Loaded voxels, grid resolution is\", self.settings.GridResolution)\n\n self.gridScale = self.settings.GridEnd - self.settings.GridStart\n self.voxelSize = self.gridScale / float(self.settings.GridResolution)\n self.entrySize = Vec2(\n 1.0 / float(self.settings.StackSizeX), 1.0 / float(self.settings.StackSizeY))\n self.frameIndex = 0\n\n invVoxelSize = Vec3(\n 1.0 / self.voxelSize.x, 1.0 / self.voxelSize.y, 1.0 / self.voxelSize.z)\n invVoxelSize.normalize()\n self.normalizationFactor = invVoxelSize / \\\n float(self.settings.GridResolution)\n\n # Debugging of voxels, VERY slow\n self.debugVoxels = False\n\n if self.debugVoxels:\n self.createVoxelDebugBox()\n\n # Load packed voxels\n packedVoxels = Globals.loader.loadTexture(\n join(self.sceneRoot, \"voxels.png\"))\n packedVoxels.setFormat(Texture.FRgba8)\n packedVoxels.setComponentType(Texture.TUnsignedByte)\n # packedVoxels.setKeepRamImage(False)\n\n # Create 3D Texture to store unpacked voxels\n self.unpackedVoxels = Texture(\"Unpacked voxels\")\n self.unpackedVoxels.setup3dTexture(self.settings.GridResolution, self.settings.GridResolution, self.settings.GridResolution,\n Texture.TFloat, Texture.FRgba8)\n self.unpackedVoxels.setMinfilter(Texture.FTLinearMipmapLinear)\n self.unpackedVoxels.setMagfilter(Texture.FTLinear)\n\n self.unpackVoxels = NodePath(\"unpackVoxels\")\n self.unpackVoxels.setShader(\n BetterShader.loadCompute(\"Shader/GI/UnpackVoxels.compute\"))\n\n print \"setting inputs ..\"\n self.unpackVoxels.setShaderInput(\"packedVoxels\", packedVoxels)\n print \"setting inputs ..\"\n self.unpackVoxels.setShaderInput(\n \"stackSizeX\", LVecBase3i(self.settings.StackSizeX))\n print \"setting inputs ..\"\n self.unpackVoxels.setShaderInput(\n \"gridSize\", LVecBase3i(self.settings.GridResolution))\n print \"setting inputs ..\"\n self.unpackVoxels.setShaderInput(\"destination\", self.unpackedVoxels)\n print \"executing shader ..\"\n self._executeShader(\n self.unpackVoxels, self.settings.GridResolution / 8, self.settings.GridResolution / 8, self.settings.GridResolution / 8)\n\n print \"creating direct radiance texture ..\"\n # Create 3D Texture to store direct radiance\n self.directRadianceCache = Texture(\"Direct radiance cache\")\n self.directRadianceCache.setup3dTexture(self.settings.GridResolution, self.settings.GridResolution, self.settings.GridResolution,\n Texture.TInt, Texture.FR32i)\n\n self.directRadiance = Texture(\"Direct radiance\")\n self.directRadiance.setup3dTexture(self.settings.GridResolution, self.settings.GridResolution, self.settings.GridResolution,\n Texture.TFloat, Texture.FRgba16)\n\n print \"setting texture states ..\"\n for prepare in [self.directRadiance, self.unpackedVoxels]:\n prepare.setMagfilter(Texture.FTLinear)\n prepare.setMinfilter(Texture.FTLinearMipmapLinear)\n prepare.setWrapU(Texture.WMBorderColor)\n prepare.setWrapV(Texture.WMBorderColor)\n prepare.setWrapW(Texture.WMBorderColor)\n prepare.setBorderColor(Vec4(0,0,0,1))\n\n self.unpackedVoxels.setBorderColor(Vec4(0))\n # self.directRadiance.setBorderColor(Vec4(0))\n\n self.populateVPLNode = NodePath(\"PopulateVPLs\")\n self.clearTextureNode = NodePath(\"ClearTexture\")\n self.copyTextureNode = NodePath(\"CopyTexture\")\n self.generateMipmapsNode = NodePath(\"GenerateMipmaps\")\n self.convertGridNode = NodePath(\"ConvertGrid\")\n\n\n if False:\n surroundingBox = Globals.loader.loadModel(\n \"Models/CubeFix/Model.egg\")\n surroundingBox.setPos(self.settings.GridStart)\n surroundingBox.setScale(self.gridScale)\n\n # surroundingBox.setTwoSided(True)\n surroundingBox.flattenStrong()\n surroundingBox.reparentTo(Globals.render)\n\n self.bindTo(self.populateVPLNode, \"giData\")\n self.reloadShader()\n\n self._generateMipmaps(self.unpackedVoxels)", "def test_register_route_factory():\n\n current_factory = application_services.get_current_route_factory()\n application_services.register_route_factory(mock_route_factory)\n assert application_services.get_current_route_factory() == mock_route_factory\n application_services.register_route_factory(current_factory)", "def set_camera_fov(args_, client_, new_fov):\n\n args_.camera_bp.set_attribute(\"fov\", \"%s\" % new_fov)\n args_.camera_depth_bp.set_attribute(\"fov\", \"%s\" % new_fov)\n\n # destroy the original actor and make a new camera object\n args_.rgb_camera.camera_actor.stop()\n args_.depth_camera.camera_actor.stop()\n commands_ = [\n # destroy the previous actor first\n carla.command.DestroyActor(args_.depth_camera.camera_actor.id),\n carla.command.DestroyActor(args_.rgb_camera.camera_actor.id),\n # spawn the new actor\n carla.command.SpawnActor(\n args_.camera_bp, carla.Transform(), args_.spectator),\n carla.command.SpawnActor(\n args_.camera_depth_bp, carla.Transform(), args_.spectator),\n ]\n response_ = client_.apply_batch_sync(commands_)\n camera_actor_ids_ = [r.actor_id for r in response_[-2:]]\n camera_, camera_depth_ = world.get_actors(\n camera_actor_ids_)\n\n args_.rgb_camera = Camera(camera_, width=args_.width,\n height=args_.height,\n fov=new_fov,\n camera_type=\"rgb\")\n\n args_.depth_camera = Camera(\n camera_depth_, camera_type=\"depth\")\n\n args_.prev_camera_fov = new_fov", "def testForwardProjection(self):\n if GlobalConfiguration.INDIVIDUAL_TEST:\n self.skipTest('Skip Projection Op -- Forward Projection Test')\n image_size = [480, 640]\n vox_size = [30, 18, 30]\n batch_size = 2\n samples = GlobalConfiguration.read_test_data(batch_size, 8)\n inputs, outputs, sess = self._ForwardProjection(batch_size, image_size, vox_size)\n feed_dict = {inputs[0]: samples[0], inputs[1]: samples[2]}\n outputs = sess.run(outputs, feed_dict=feed_dict)\n cur_index = 0\n for img_vox_map, img_proj_pos, vox_occupied in zip(*outputs):\n # img_vox_map & vox_occupied cross validation\n vox_vox_occupied = np.reshape(vox_occupied, newshape=[-1])\n img_vox_indices, img_vox_counts = np.unique(img_vox_map, return_counts=True)\n img_vox_counts = np.delete(img_vox_counts, np.where(img_vox_indices == -1), axis=0)\n img_vox_indices = np.delete(img_vox_indices, np.where(img_vox_indices == -1), axis=0)\n img_vox_indices = img_vox_indices.astype(np.uint32)\n vox_indices = np.argwhere(vox_vox_occupied > 0)\n self.assertTrue(np.array_equal(img_vox_indices, np.reshape(vox_indices, newshape=[-1])))\n self.assertTrue(np.array_equal(img_vox_counts, np.reshape(vox_vox_occupied[vox_indices], newshape=[-1])))\n # img_proj_pos visual validation\n img_proj_pos = img_proj_pos[img_proj_pos[:, :, 2] > 0]\n img_proj_pos = img_proj_pos / 0.02\n img_proj_pos = img_proj_pos - np.min(img_proj_pos, axis=0)\n bins = np.ceil(np.max(img_proj_pos, axis=0))\n hist, _ = np.histogramdd(img_proj_pos, bins=bins)\n if os.path.exists(GlobalConfiguration.Visualization_Dir):\n saving_dir = os.path.join(GlobalConfiguration.Visualization_Dir, 'forward_projection')\n if not os.path.exists(saving_dir):\n os.mkdir(saving_dir)\n vis_model_path = os.path.join(saving_dir, 'camera_pose_view_%d' % cur_index)\n hist = np.expand_dims(hist, axis=-1)\n sparse_indices, _ = visualize.cond_sparse_represent(hist, lambda x: x > 0, color_norm=False)\n visualize.sparse_vox2ply(sparse_indices, hist.shape[:-1], name=vis_model_path)\n vis_model_path = os.path.join(saving_dir, 'volume_pose_view_%d' % cur_index)\n visualize.sparse_vox2ply(np.expand_dims(img_vox_indices, axis=-1), vox_size,\n 1, np.expand_dims(img_vox_counts, axis=-1), face=True, name=vis_model_path)\n cur_index += 1", "def HasFOV(self):\n return _gmat_py.Hardware_HasFOV(self)", "def plot_rectangular_FOV(ax, exp='cm'):\n params = {'lon': {'cm': [50, 200], 'argo': [25, 100], '40': [285, 320]},\n 'color': {'cm':'magenta', 'argo': 'green', '40':'green'},\n #'x': {'cm': 150, 'argo': 75, '40':-50},\n 'x': {'cm': 165, 'argo': 90, '40':-50},\n 'y': {'cm': 7, 'argo': 7, '40':-55.5},\n 'label': {'cm': 'CASA-MIA', 'argo': 'ARGO-YBJ', '40':'IC-40'}}\n\n lon1 = move_gc_to_center(params['lon'][exp][0]*np.pi/180.)*180./np.pi\n lon2 = move_gc_to_center(params['lon'][exp][1]*np.pi/180.)*180./np.pi\n ax.plot([lon1, lon1], [-5, 5], lw=2,\n c=params['color'][exp], ls='--')\n ax.plot([lon2, lon2], [-5, 5], lw=2,\n c=params['color'][exp], ls='--')\n\n if exp == 'cm':\n ax.plot([lon2, -180], [-5, -5], lw=2,\n c=params['color'][exp], ls='--')\n ax.plot([lon2, -180], [5, 5], lw=2,\n c=params['color'][exp], ls='--')\n ax.plot([lon1, 180], [5, 5], lw=2,\n c=params['color'][exp], ls='--')\n ax.plot([lon1, 180], [-5, -5], lw=2,\n c=params['color'][exp], ls='--')\n else:\n lon1 += 1.9\n ax.plot([lon1, lon2], [-5, -5], lw=2,\n c=params['color'][exp], ls='--')\n ax.plot([lon1, lon2], [5, 5], lw=2,\n c=params['color'][exp], ls='--')\n ax.text(x=params['x'][exp], y=params['y'][exp],\n s=params['label'][exp], color=params['color'][exp],\n fontsize=40, fontweight='bold')", "def test_world(self):\n f = AvatarFactory('world')\n self.assertEqual(f.world, 'world')", "def test_convex_init(self):\n print(\"Convex_Init\")\n finder = dc.dock.ConvexHullPocketFinder()", "def runTest(self):\n self.setUp()\n self.test_visuThreeD1()", "def test_marker_camera_interface() -> None:\n assert MarkerCamera.interface_class() == MarkerCameraInterface", "def test_marker_camera_interface_implementation() -> None:\n MockMarkerCameraDriver()", "def test_ipam_vrfs_update(self):\n pass", "def test_generate_frustum_planes_stereo() -> None:\n near_clip_dist = 3.56 # arbitrary value\n\n # Set \"focal_length_x_px_\"\n fx_px = 3666.534329132812\n\n # Set \"focal_length_y_px_\"\n fy_px = 3673.5030423482513\n\n # Set \"focal_center_x_px_\"\n cx_px = 1235.0158218941356\n\n # Set \"focal_center_y_px_\"\n cy_px = 1008.4536901420888\n\n camera_name = \"stereo_front_left\"\n height_px = 1550\n width_px = 2048\n\n pinhole_camera = _create_pinhole_camera(\n fx_px=fx_px,\n fy_px=fy_px,\n cx_px=cx_px,\n cy_px=cy_px,\n height_px=height_px,\n width_px=width_px,\n cam_name=camera_name,\n )\n (\n left_plane,\n right_plane,\n near_plane,\n bottom_plane,\n top_plane,\n ) = pinhole_camera.frustum_planes(near_clip_dist)\n\n left_plane_expected: NDArrayFloat = np.array([fx_px, 0.0, width_px / 2.0, 0.0])\n right_plane_expected: NDArrayFloat = np.array([-fx_px, 0.0, width_px / 2.0, 0.0])\n near_plane_expected: NDArrayFloat = np.array([0.0, 0.0, 1.0, -near_clip_dist])\n bottom_plane_expected: NDArrayFloat = np.array([0.0, -fx_px, height_px / 2.0, 0.0])\n top_plane_expected: NDArrayFloat = np.array([0.0, fx_px, height_px / 2.0, 0.0])\n\n assert np.allclose(\n left_plane, left_plane_expected / np.linalg.norm(left_plane_expected)\n )\n assert np.allclose(\n right_plane, right_plane_expected / np.linalg.norm(right_plane_expected)\n )\n assert np.allclose(\n bottom_plane, bottom_plane_expected / np.linalg.norm(bottom_plane_expected)\n )\n assert np.allclose(\n top_plane, top_plane_expected / np.linalg.norm(top_plane_expected)\n )\n assert np.allclose(near_plane, near_plane_expected)", "def test___init__(self):\n f0 = 5 * (np.random.rand(10, 5) - 0.5)\n ga = population.Evolver(f0, eval_one_max)\n self.assertTrue(hasattr(ga, 'register'))\n\n # should have called evalute\n self.assertEqual(ga.generations[-1].new, 0)\n\n # should have registered a default ranking function\n self.assertEqual(np.round(np.sum(ga.rank())), len(f0))", "def test_voxel(self):\n for m in [g.get_mesh('featuretype.STL'),\n g.trimesh.primitives.Box(),\n g.trimesh.primitives.Sphere()]:\n for pitch in [.1, .1 - g.tol.merge]:\n surface = m.voxelized(pitch=pitch)\n\n # make sure the voxelized pitch is similar to passed\n assert g.np.allclose(surface.pitch, pitch)\n\n for fill_method in ('base', 'orthographic'):\n solid = surface.copy().fill(method=fill_method)\n\n assert len(surface.encoding.dense.shape) == 3\n assert surface.shape == surface.encoding.dense.shape\n assert surface.volume > 0.0\n\n assert isinstance(surface.filled_count, int)\n assert surface.filled_count > 0\n\n box_surface = surface.as_boxes()\n box_solid = solid.as_boxes()\n\n assert isinstance(box_surface, g.trimesh.Trimesh)\n assert abs(box_solid.volume - solid.volume) < g.tol.merge\n\n assert g.trimesh.util.is_shape(\n surface.sparse_indices, (-1, 3))\n assert len(\n solid.sparse_indices) >= len(\n surface.sparse_indices)\n assert solid.sparse_indices.shape == solid.points.shape\n outside = m.bounds[1] + m.scale\n for vox in surface, solid:\n assert vox.sparse_indices.shape == vox.points.shape\n assert g.np.all(vox.is_filled(vox.points))\n assert not vox.is_filled(outside)\n\n try:\n cubes = surface.marching_cubes\n assert cubes.area > 0.0\n except ImportError:\n g.log.info('no skimage, skipping marching cubes test')\n\n g.log.info('Mesh volume was %f, voxelized volume was %f',\n m.volume,\n surface.volume)", "def testMazeExists(self):\n pass" ]
[ "0.61097986", "0.6080804", "0.57732624", "0.57588035", "0.55853933", "0.54319566", "0.5408116", "0.5356413", "0.53268194", "0.5303767", "0.5239329", "0.5229449", "0.52219445", "0.51999205", "0.5182848", "0.5173113", "0.51626253", "0.51581705", "0.5127742", "0.50777745", "0.5049118", "0.50465536", "0.50379044", "0.50232553", "0.4993731", "0.49597096", "0.4956448", "0.49447432", "0.492808", "0.4923488" ]
0.8358435
0
Draws the optimal route/path AFTER the algorithm has found it
def draw_best_route(final_route): shape('turtle') fillcolor('purple') pencolor('purple') pensize(4) speed(1) # Finds the start position of the node in the graphical grid start_pos_x = (final_route[0].x) * 30 start_pos_y = (final_route[0].y - 1) * -30 penup() # Sets the start position of the drawn path in the middle of the start node setpos(-500 + start_pos_x + 15, 200 + start_pos_y - 15) pendown() # Draws right, left, down or up based on the position of the next node in the list for i in range(0, len(final_route) - 1): if final_route[i].x < final_route[i + 1].x: goto(xcor() + 30, ycor()) elif final_route[i].x > final_route[i + 1].x: goto(xcor() - 30, ycor()) elif final_route[i].y < final_route[i + 1].y: goto(xcor(), ycor() - 30) else: goto(xcor(), ycor() + 30) done()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_path(self):\n #Se repite el ciclo para el número especificado de veces\n for i in range(self.iterations):\n for ant in self.ants:\n ant.setup_ant()\n while not ant.final_node_reached:\n #Seleccion aleatoria del nodo a visitar\n node_to_vist = self.select_next_node(self.map.nodes_array[int(ant.actual_node[0])][int(ant.actual_node[1])])\n #Mover la hormiga al siguiente nodo seleccionado al azar\n ant.move_ant(node_to_visit)\n #Compruebe si se ha alcanzado la solución\n ant.is_final_node_reached()\n #Agregar la ruta resultante a la lista de rutas\n self.add_to_path_results(self.delete_loops(ant.get_visited_nodes()))\n # Habilitar a la hormiga para otra busqueda\n ant.enable_start_new_path()\n \n # Actualizar el nivel global de feromonas\n self.pheromone_update()\n self.best_result = self.paths[0]\n\n #Vaciar la lista de rutas\n self.empty_paths()\n print('Iteration: ', i, 'lenght of the path: ', len(self.best_result))\n return self.best_result", "def drawpath(self,obstacles):\n for i in obstacles:\n self.distance_map[i[0],i[1]]=44\n print(\"Distance map\")\n print(self.distance_map)\n for i in self.footprint:\n self.distance_map[i[0],i[1]]=88\n print(\"Evaluated path\")\n print(self.distance_map)", "def plan_path(self, start_point, end_point, map_obj):\n # STUFF FOR TESTING \n if self.enable_vis:\n marker = Marker()\n marker.header.frame_id = \"/map\"\n marker.type = marker.POINTS\n marker.action = marker.ADD\n \n marker.scale.x = 0.1\n marker.scale.y = 0.1\n self.vis_pub.publish(marker)\n \n exploration_bias = 1.0 - self.goal_bias\n final_node = None\n num_existing_path_points_added = 0\n \n self.rrt_star = RRTStar(Node(start_point))\n self.max_iterations = self.rrt_star.max_size\n while self.rrt_star.size <= self.max_iterations:\n p = np.random.uniform()\n if p < exploration_bias:\n \n x_rand = self.map.sample_free_space()\n else:\n if final_node is None:\n x_rand = end_point\n else:\n x_rand = self.branched_from_existing_path(\n final_node,\n depth_underestimate=num_existing_path_points_added\n )\n num_existing_path_points_added += 1\n\n x_nearest = self.rrt_star.nearest(x_rand) # Find the nearest node to x_rand\n\n path = self.map.generate_line_path(x_nearest.value, x_rand, eta=self.eta)\n if path is not None: # no obstacles between x_nearest and x_rand\n x_new = path[-1]\n X_nearby_connectable = self.find_nearby_connectable(x_nearest, x_new)\n\n cost_min, node_min = self.find_best_parent(X_nearby_connectable, x_new)\n\n X_nearby_connectable.remove(node_min) # Remove x_new's parent node from the list of nearby nodes so it is not considered for rewiring\n \n # Create the new node at x_new!\n node_new = self.rrt_star.add_config(node_min, x_new)\n \n if self.enable_vis:\n # FOR TESTING ONLY #\n # Code to publish marker for new node\n ###########################################################################################\n TEMP = Point()\n TEMP.x = x_new[0]\n TEMP.y = x_new[1]\n TEMP.z = .05\n marker.points.append(TEMP)\n \n TEMP = ColorRGBA()\n TEMP.r = 1\n TEMP.g = 0\n TEMP.b = 0\n TEMP.a = 1\n \n marker.colors.append(TEMP)\n \n self.vis_pub.publish(marker)\n ###########################################################################################\n\n self.rewire(cost_min, node_new, X_nearby_connectable)\n \n if np.allclose(node_new.value, end_point, .05, 0) and (final_node is None):#np.array_equal(node_new.value, end_point):\n final_node = node_new\n # reduce exploration bias so that we reinforce the existing path\n exploration_bias = .5\n if VERBOSE:\n print(\"Path found!!!!\")\n print(final_node.cost)\n if rospy.get_time() - self.start_time > self.time_thresh:\n if VERBOSE:\n print(self.rrt_star.size)\n break\n\n \n if final_node is not None:\n if self.enable_vis:\n marker = Marker()\n marker.header.frame_id = \"/map\"\n marker.type = marker.POINTS\n marker.action = marker.ADD\n \n marker.scale.x = 0.1\n marker.scale.y = 0.1\n marker.points = []\n marker.colors = []\n def recur(node):\n if self.enable_vis:\n TEMP = Point()\n TEMP.x = node.value[0]\n TEMP.y = node.value[1]\n TEMP.z = .05\n marker.points.append(TEMP)\n \n TEMP = ColorRGBA()\n TEMP.r = 1\n TEMP.g = 0\n TEMP.b = 0\n TEMP.a = 1\n \n marker.colors.append(TEMP)\n \n \n self.trajectory.points.append([node.value[0], node.value[1]])\n parent = node.parent\n if parent is not None:\n recur(parent)\n recur(final_node)\n self.trajectory.points.reverse()\n if self.enable_vis:\n self.vis_pub.publish(marker)\n if VERBOSE:\n print (final_node.depth)\n else:\n if VERBOSE:\n print(\"No path found! Please try again.\")\n \n \n \n # publish trajectory\n self.traj_pub.publish(self.trajectory.toPoseArray())\n\n # visualize trajectory Markers\n self.trajectory.publish_viz()", "def create_path(self): # weights, path_data, show_path=True):\n\n # control variables\n max_distance_to_pf = np.linalg.norm(np.subtract(\n self.pf, np.sum([self.p0, self.d2*self.l2/2], axis=0)))\n current_position_index = [0, int(len(self.lines_list)/2)] # X, Y\n final_position_index = [\n len(self.lines_list[0]) - 1, int(len(self.lines_list)/2)] # X, Y\n points_displaced = 0\n max_fishing = 0\n\n # main path, which goes directly from initial to final points\n for i in range(len(self.lines_list[0])):\n self.x_list_main.append(\n self.lines_list[int(len(self.lines_list)/2)][i][\"position\"][0])\n self.y_list_main.append(\n self.lines_list[int(len(self.lines_list)/2)][i][\"position\"][1])\n self.fish_prob_list_main .append(\n self.lines_list[int(len(self.lines_list)/2)][i][\"fish\"])\n\n path_index = [[current_position_index[0], current_position_index[1]]]\n\n self.fish_prob_list.append(\n self.lines_list[current_position_index[1]][current_position_index[0]][\"fish\"])\n\n while current_position_index != final_position_index:\n score = -float('inf')\n max_score = -float('inf')\n a_max = 0\n b_max = 0\n for a in range(-1, 2, 1): # self.d2 values\n for b in range(0, 2, 1): # self.d1 values\n if a == 0 and b == 0:\n continue # we should always move\n elif [current_position_index[0] + b, current_position_index[1] + a] not in path_index:\n # we get the next point data from the self.lines_list variables\n try:\n values = self.lines_list[current_position_index[1] +\n a][current_position_index[0] + b]\n score = self.weights[\"fish\"] * values[\"fish\"] \\\n - self.weights[\"straight_line_distance\"] * values[\"distance_to_l1\"] / (self.l2/2) \\\n - self.weights[\"final_point_distance\"] * \\\n values[\"distance_to_pf\"] / max_distance_to_pf\n #+ self.weights[\"fuel\"] * values[\"fuel\"] + self.weights[\"area\"] * values[\"area\"]\n\n except IndexError:\n # Position not reachable\n continue\n\n if score > max_score:\n max_score = score\n max_fishing = values[\"fish\"]\n a_max = a\n b_max = b\n\n current_position_index[0] += b_max # X\n current_position_index[1] += a_max # Y\n path_index.append([current_position_index[0],\n current_position_index[1]]) # X Y\n\n self.fish_prob_list.append(max_fishing)\n\n points_displaced += 1\n\n step_filter = 3\n paths_coincide = True\n path_index_filtered = []\n index_to_insert = []\n counter = 0\n\n # filter points\n while True:\n\n if counter % step_filter == 0 or path_index[counter] == path_index[-1]:\n if len(path_index) - counter < step_filter and not path_index[counter] == path_index[-1]:\n step_filter = len(path_index) - counter\n\n try:\n index_to_insert = []\n paths_coincide = True\n for i in range(counter, counter+step_filter, 1):\n paths_coincide = (\n path_index[i][1] == path_index[i+1][1]) and paths_coincide\n index_to_insert.append(path_index[i])\n\n except Exception:\n paths_coincide = False\n\n if paths_coincide:\n for index in index_to_insert:\n path_index_filtered.append(index)\n\n for j in range(counter, counter+step_filter, 1):\n self.fish_prob_list_filtered.append(\n self.fish_prob_list[j])\n\n counter += step_filter\n\n else:\n path_index_filtered.append(path_index[counter])\n self.fish_prob_list_filtered.append(\n self.fish_prob_list[counter])\n counter += 1\n\n else:\n counter += 1\n\n if counter >= len(path_index):\n break\n\n # we pass from index to real positions\n for index in path_index:\n self.x_list.append(\n self.lines_list[index[1]][index[0]][\"position\"][0])\n self.y_list.append(\n self.lines_list[index[1]][index[0]][\"position\"][1])\n\n for index in path_index_filtered:\n self.x_list_filtered.append(\n self.lines_list[index[1]][index[0]][\"position\"][0])\n self.y_list_filtered.append(\n self.lines_list[index[1]][index[0]][\"position\"][1])\n\n return [[self.x_list_main, self.y_list_main, 'green'], [self.x_list, self.y_list, 'red'],\n [self.x_list_filtered, self.y_list_filtered, 'blue']], \\\n [self.fish_prob_list, self.fish_prob_list_filtered,\n self.fish_prob_list_main]", "def Find_Path(self):\n closed_nodes_map = [] # map of closed (tried-out) nodes\n open_nodes_map = [] # map of open (not-yet-tried) nodes\n dir_map = [] # map of directions\n row = [0] * self.n\n for i in range(self.m): # create 2d arrays\n closed_nodes_map.append(list(row))\n open_nodes_map.append(list(row))\n dir_map.append(list(row))\n \n pq = [[], []] # priority queues of open (not-yet-tried) nodes\n pqi = 0 # priority queue index\n # create the start node and push into list of open nodes\n n0 = node(self.xStart, self.yStart, 0.0, 0.0)\n n0.updatePriority(self.xFinish, self.yFinish)\n heappush(pq[pqi], n0)\n open_nodes_map[self.yStart][self.xStart] = n0.priority # mark it on the open nodes map\n \n # A* search\n while len(pq[pqi]) > 0:\n # get the current node w/ the highest priority\n # from the list of open nodes\n n1 = pq[pqi][0] # top node\n n0 = node(n1.xPos, n1.yPos, n1.distance, n1.priority)\n x = n0.xPos\n y = n0.yPos\n heappop(pq[pqi]) # remove the node from the open list\n open_nodes_map[y][x] = 0\n # mark it on the closed nodes map\n closed_nodes_map[y][x] = 1\n \n # quit searching when the goal state is reached\n if x == self.xFinish and y == self.yFinish:\n # Generate the path from finish to start by following the \n # directions.\n return self.Reconstruct_Path(dir_map)\n \n # generate moves (child nodes) in all possible directions\n for i in range(self.num_directions):\n new_x = x + self.dx[i]\n new_y = y + self.dy[i]\n Flag=True\n if not (new_x < 0 or new_x > self.n-1 or new_y < 0 or new_y > self.m - 1\n or self.MAP[new_y][new_x] == 1 or closed_nodes_map[new_y][new_x] == 1):\n # Check to see if the extended path runs through any obstacles\n if (abs(self.dx[i])>1 or abs(self.dy[i])>1):\n # Need to check that the path does not pass an object\n JumpCells=2*max(abs(self.dx[i]),abs(self.dy[i]))-1\n for K in range(1,JumpCells):\n YPOS=int(round(K*1.0*self.dy[i]/JumpCells))\n XPOS=int(round(K*1.0*self.dx[i]/JumpCells))\n if (self.MAP[y+YPOS][x+XPOS]==1):\n Flag=False\n if Flag: \n # generate a child node\n m0 = node(new_x, new_y, n0.distance, n0.priority)\n m0.calc_cost(self.dx[i], self.dy[i])\n m0.updatePriority(self.xFinish, self.yFinish)\n # if it is not in the open list then add into that\n if open_nodes_map[new_y][new_x] == 0:\n open_nodes_map[new_y][new_x] = m0.priority\n heappush(pq[pqi], m0)\n # mark its parent node direction\n dir_map[new_y][new_x] = (self.num_directions-i-1) % self.num_directions\n elif open_nodes_map[new_y][new_x] > m0.priority:\n # update the priority info\n open_nodes_map[new_y][new_x] = m0.priority\n # update the parent direction info\n dir_map[new_y][new_x] = (self.num_directions-i-1) % self.num_directions\n # replace the node\n # by emptying one pq to the other one\n # except the node to be replaced will be ignored\n # and the new node will be pushed in instead\n while not (pq[pqi][0].xPos == new_x and pq[pqi][0].yPos == new_y):\n heappush(pq[1 - pqi], pq[pqi][0])\n heappop(pq[pqi])\n heappop(pq[pqi]) # remove the wanted node\n # empty the larger size pq to the smaller one\n if len(pq[pqi]) > len(pq[1 - pqi]):\n pqi = 1 - pqi\n while len(pq[pqi]) > 0:\n heappush(pq[1-pqi], pq[pqi][0])\n heappop(pq[pqi]) \n pqi = 1 - pqi\n heappush(pq[pqi], m0) # add the better node instead\n return '','' # no route found", "def plan_path(self, msg):\n # Request the map\n # In case of error, return an empty path\n mapdata = PathPlanner.request_map()\n\n if mapdata is None:\n return Path()\n # Calculate the C-space and publish it\n cspacedata = self.calc_cspace(mapdata, 3)\n # Execute A*\n start = PathPlanner.world_to_grid(mapdata, msg.start.pose.position)\n goal = PathPlanner.world_to_grid(mapdata, msg.goal.pose.position)\n \n path = self.a_star(cspacedata, start, goal) #, self.c_space_array, self.frontier, self.expanded)\n \n # Optimize waypoints\n waypoints = PathPlanner.optimize_path(path)\n # print waypoints\n waypoints.remove(waypoints[0])\n # print waypoints\n\n self.path_pub.publish(self.path_to_message(cspacedata, waypoints))\n # Return a Path message\n return self.path_to_message(cspacedata, waypoints)", "def path_search(start, goal):\n if start == goal:\n return [start]\n explored = {}\n explored[start] = 2\n queue = [ [start, ('', 0)] ]\n bestPath = [start, ('', 1110)]\n bestPathList = []\n total = 0\n costSearchingNow = 0\n while queue:\n total += 1\n # if total>40000:\n # return -1,' fail'\n if queue[0][-1][-1] != costSearchingNow:\n \tqueue.sort(key=lambda path:path[-1][-1])\n \n path = queue.pop(0)\n costSearchingNow = path[-1][-1]\n s = path[-2]\n # print len(queue)\n # cout(path)\n # print queue\n\n if s == goal:\n bestPath = path\n # print 'Find one best path ↑'\n bestPathList.append(bestPath)\n if len(queue)==0:\n # print '~~~~',total,getString \n return total,getString(bestPathList,start,goal)\n else:\n if path[-1][-1] > bestPath[-1][-1]:\n return total,getString(bestPathList,start,goal)\n\n linenum, changetimes = path[-1]\n \n for state, actions in sh_subway[s].items():\n for action in actions:\n linechange = changetimes + 1\n if linenum != action:\n linechange += changePunishment\n path2 = path[:-1] + [action, state, (action, linechange)]\n\n if (path2[-1][-1]-len(path2)/2-1)/changePunishment <= 4:\n if len(path2)>6:\n if (path2[-2] == '上海赛车场' and path2[-4]=='嘉定新城' and path2[-6]=='马陆') or (path2[-6] == '上海赛车场' and path2[-4]=='嘉定新城' and path2[-2]=='马陆') or (path2[-2] == '龙柏新村' and path2[-4]=='龙溪路' and path2[-6]=='水城路') or (path2[-6] == '龙柏新村' and path2[-4]=='龙溪路' and path2[-2]=='水城路'):\n linechange -= changePunishment\n path2 = path[:-1] + [action, state, (action, linechange)]\n\n if path2.count(state)<=1:\n if state not in explored:\n explored[state] = linechange\n queue.append(path2)\n \n elif linechange <= explored[state]+changePunishment: # 考虑马上到终点\n \n explored[state] = linechange\n queue.append(path2)\n\n\n return total,getString(bestPathList,start,goal)", "def update(self):\n if self.iterations <= self.iterationCounter:\n self.view.stopView()\n self.view.drawBestPath(self.bestPath)\n return\n actualDistance = self.model.distance(self.view.lines_id)\n if actualDistance < self.bestDistance:\n self.bestDistance = actualDistance\n self.bestPath = self.model.coordinates[:]\n\n if self.view.running:\n self.model.shuffle()\n self.view.deleteConnections()\n self.view.connectCities(self.model.coordinates)\n self.view.set_refresh(self.refresh_time)\n self.iterationCounter += 1\n self.view.updateLabels(self.iterationCounter, self.bestDistance)", "def determineNextMove(player_location, opponentLocation, coins):\n global route, currentcoin, meta_route, best_weight, best_path, coins_to_search, index\n if opponentLocation in coins_to_search:\n coins_to_search, meta_route, route = change_way(coins, opponentLocation, player_location)[:3]\n index = 0\n elif currentcoin == player_location: \n if len(route) != 0:\n old_dist = algo.dijkstra(mazeMap, player_location)[1][meta_route[index+1]]\n coins_to_search2, meta_route2, route2, new_dist = change_way(coins, opponentLocation, player_location)\n\n #dist_matrix, route_matrix = u.update_dists_from_each(dists_matrix, routes_matrix, player_location, mazeMap, coins)\n #coins_to_search = get_n_shortest(3, coins, player_location, dists_matrix)\n \t\n #ennemy_dists = algo.dijkstra(mazeMap, opponentLocation)\n #for c in coins_to_search:\n #if len(coins_to_search) >= 2 and ennemy_dists[1][c] < dists_matrix[player_location][c]:\n # coins_to_search.remove(c)\n #break\n \t\t\n #best_weight = float(\"inf\")\n #best_path = []\n #exhaustive(coins_to_search, player_location, [], 0, dist_matrix)\n #meta_route2 = [player_location] + best_path\n #route2 = u.location_list_to_route(meta_route2, route_matrix)\n #new_dist = dist_matrix[player_location][meta_route2[1]]\n\t\t\n if len(route) == 0 or old_dist - new_dist > 3:\n route = route2\n meta_route = meta_route2 \n index = 0\n index += 1\n currentcoin = meta_route[index]\n #api.debug(route)\n return u.direction(player_location, route.pop(0))", "def handle_solution(node, start_sq):\n final_route = []\n while True: # Find the best path by backtracking through all the parents, starting with the goal node\n final_route.insert(0, node)\n if node == start_sq:\n break\n node = node.parent\n print('Best path from A to B:')\n print_list(final_route)\n draw_best_route(final_route)", "def visualize_routes(self):\n visualize_tsp.plotTSP([self.best_solution], self.coords)", "def execute(self, pathfinder: Callable) -> None:\n if not self.draw_explored.is_alive() and not self.draw_result.is_alive():\n self.scene.clear_path() # Remove currently-drawn path tiles\n start = time()\n result, explored = pathfinder(self.scene.start, self.scene.goal, self.scene, self.heuristic)\n end = time()\n\n # self.text.setPlainText(f\"Time taken: {round(end - start, 4)}s\\nNodes visited: {len(explored)}\\nNodes in path: {len(result)}\")\n # self.scene.addItem(self.text)\n log = f\"Time taken: {round(end - start, 4)}s\\nNodes visited: {len(explored)}\\nNodes in path: {len(result)}\"\n self.scene.set_text_log(log)\n \n if explored != []:\n # Draws explored tiles. Multi-threading used for tiles to draw sequentially\n self.draw_explored = threading.Thread( \\\n target=self.scene.draw_cell_sequence, \\\n args=(explored, CellType.searched, True), \\\n daemon = True\n )\n self.draw_explored.start()\n\n if result != []:\n # Draws path. Multi-threading used for path to draw sequentially\n # self.draw_explored is fed as 'prev_thread' so that path is drawn after explored tiles\n self.draw_result = threading.Thread( \\\n target=self.scene.draw_cell_sequence, \\\n args=(result, CellType.path, True, self.draw_explored), \\\n daemon = True\n )\n self.draw_result.start()\n\n self.scene.set_cell(self.scene.start, Cell(val = CellType.start))\n self.scene.set_cell(self.scene.goal, Cell(val = CellType.goal))\n self.scene.color_cell(self.scene.start)\n self.scene.color_cell(self.scene.goal)", "def algorithm(self):\n t = time.clock()\n self.calculateFirstPath()\n improve = True\n while improve and (self.allowedTime > (time.clock() - t)):\n improve = False\n\n for i in range(self.NB_OF_NODES):\n for j in range(self.NB_OF_NODES):\n if j in [(i - 1) % self.NB_OF_NODES, i, (i + 1) % self.NB_OF_NODES]:\n continue\n\n if self.getDistance(i, i + 1) + self.getDistance(j, j + 1) > self.getDistance(i, j) + self.getDistance(i + 1, j + 1):\n self.exchange(i, j)\n improve = True", "def generate_possible_paths(self, obstacle):\n if self.does_uav_intersect_obstacle_vertically(obstacle, self.drone.get_point(), self.drone.get_waypoint_holder().get_current_waypoint()):\n if self.does_path_intersect_obstacle_2d(obstacle, self.drone.get_point(), self.drone.get_waypoint_holder().get_current_waypoint()):\n new_attempt_pos_points = [\n [obstacle.get_point()[0] + obstacle.get_radius(), obstacle.get_point()[1] + obstacle.get_radius(), self.drone.get_point()[2]],\n [obstacle.get_point()[0] - obstacle.get_radius(), obstacle.get_point()[1] - obstacle.get_radius(), self.drone.get_point()[2]],\n [obstacle.get_point()[0] + obstacle.get_radius(), obstacle.get_point()[1] - obstacle.get_radius(), self.drone.get_point()[2]],\n [obstacle.get_point()[0] - obstacle.get_radius(), obstacle.get_point()[1] + obstacle.get_radius(), self.drone.get_point()[2]],\n [obstacle.get_point()[0], obstacle.get_point()[1] + obstacle.get_radius(), obstacle.get_height() + (Constants.STATIONARY_OBSTACLE_SAFETY_RADIUS * 2)],\n [obstacle.get_point()[0], obstacle.get_point()[1] - obstacle.get_radius(), obstacle.get_height() + (Constants.STATIONARY_OBSTACLE_SAFETY_RADIUS * 2)],\n [obstacle.get_point()[0] + obstacle.get_radius(), obstacle.get_point()[1], obstacle.get_height() + (Constants.STATIONARY_OBSTACLE_SAFETY_RADIUS * 2)],\n [obstacle.get_point()[0] - obstacle.get_radius(), obstacle.get_point()[1], obstacle.get_height() + (Constants.STATIONARY_OBSTACLE_SAFETY_RADIUS * 2)]\n ]\n\n new_paths = []\n for new_pos_point in new_attempt_pos_points:\n if not self.does_path_intersect_obstacle_3d(obstacle, self.drone.get_point(), new_pos_point) and self.flight_boundary.is_point_in_bounds(new_pos_point):\n for recursive_new_pos_point in new_attempt_pos_points:\n if self.flight_boundary.is_point_in_bounds(recursive_new_pos_point) and abs(recursive_new_pos_point[2] - new_pos_point[2]) < 5:\n if recursive_new_pos_point[0] != new_pos_point[0] or recursive_new_pos_point[1] != new_pos_point[1]:\n if not self.does_path_intersect_obstacle_3d(obstacle, new_pos_point, recursive_new_pos_point) and not self.does_path_intersect_obstacle_3d(obstacle, recursive_new_pos_point, self.drone.get_waypoint_holder().get_current_waypoint()):\n new_paths.append([new_pos_point, recursive_new_pos_point])\n\n # Uncomment for DEBUGGING ONLY\n for path in new_paths:\n print(\"Point:\", str(path))\n\n return new_paths\n\n return []", "def extra(maze):\n # TODO: Write your code here\n heuristic_lookup = {} \n objs = maze.getObjectives()\n corner_list = maze.getObjectives()\n start = maze.getStart()\n path = []\n dim = maze.getDimensions()\n visited = {}\n lookup_table = {}\n p_queue = []\n edgeset = []\n mintree = {}\n start_heuristic = 0 + multi_dot_heuristic_query(maze, start, objs, edgeset, mintree) * 2\n heuristic_lookup[(start, tuple(objs))] = start_heuristic\n start_state = state(start, corner_list)\n lookup_table[state(start, corner_list)] = (start_heuristic, 0, state((-2, -2)))\n p_queue.append((start_heuristic, state(start, corner_list)))\n while p_queue:\n pair = p_queue.pop(0)\n visited[pair[1]] = lookup_table.get(pair[1])[2]\n if not pair[1].getlist():\n current_state = pair[1]\n while current_state != start_state:\n path.append(current_state.getpos())\n current_state = visited.get(current_state)\n path.append(start)\n path.reverse()\n return path\n else: \n list_of_neighbors = maze.getNeighbors(pair[1].getpos()[0], pair[1].getpos()[1])\n for coordinates in list_of_neighbors:\n current_state = state(coordinates)\n if coordinates in pair[1].getlist():\n new_list = copy.copy(pair[1].getlist())\n new_list.remove(coordinates)\n current_state = state(coordinates, new_list)\n else:\n current_state = state(coordinates, pair[1].getlist()) \n if current_state in visited:\n continue\n if current_state in lookup_table:\n if (lookup_table.get(current_state)[0], current_state) in p_queue:\n cost = lookup_table.get(pair[1])[1] + 1\n queried_heuristic = 0\n if (current_state.getpos(), tuple(current_state.getlist())) in heuristic_lookup:\n queried_heuristic = heuristic_lookup.get((current_state.getpos(), tuple(current_state.getlist())))\n else:\n queried_heuristic = multi_dot_heuristic_query(maze, current_state.getpos(), current_state.getlist(), edgeset, mintree) * 2\n heuristic_lookup[(current_state.getpos(), tuple(current_state.getlist()))] = queried_heuristic\n heuristic = queried_heuristic + cost\n old_heuristic = lookup_table.get(current_state)[0]\n if heuristic < lookup_table.get(current_state)[0]:\n lookup_table[current_state] = (heuristic, cost, pair[1])\n p_queue.remove((old_heuristic, current_state))\n bisect.insort(p_queue, (heuristic, current_state))\n else:\n cost = lookup_table.get(pair[1])[1] + 1\n queried_heuristic = 0\n if (current_state.getpos(), tuple(current_state.getlist())) in heuristic_lookup:\n queried_heuristic = heuristic_lookup.get((current_state.getpos(), tuple(current_state.getlist()))) \n else:\n queried_heuristic = multi_dot_heuristic_query(maze, current_state.getpos(), current_state.getlist(), edgeset, mintree) * 2\n heuristic_lookup[(current_state.getpos(), tuple(current_state.getlist()))] = queried_heuristic\n heuristic = queried_heuristic + cost\n lookup_table[current_state] = (heuristic, cost, pair[1])\n bisect.insort(p_queue, (heuristic, current_state))\n\n return []", "def updatePath(self):\n logging.debug(\"ShortestPathUI.updatePath function started\")\n error = False\n fromVertex = self.fromLineEdit.text()\n toVertex = self.toLineEdit.text()\n graphVertices = self.controller.getVertices()\n if fromVertex not in graphVertices:\n self.__drawErrorInfo(self.fromLineEdit)\n error = True\n if toVertex not in graphVertices:\n self.__drawErrorInfo(self.toLineEdit)\n error = True\n if not error:\n self.pathLineEdit.setText(\"\")\n self.lengthLabel.setText(\"\")\n path, length = self.controller.calculateShortestPath(fromVertex, toVertex)\n path = \" -- \".join(path)\n self.pathLineEdit.setText(f\"{path}\")\n self.lengthLabel.setText(f\"{length}\")\n\n logging.debug(\"ShortestPathUI.updatePath function ended\\n\")", "def draw(self):\n spacing = 50\n # # Pygame Setup # #\n # calculate how wide and tall it needs to be\n width = (self.num_hidden_layers + 3) * spacing * 2\n values = [self.num_input_nodes, self.num_hidden_nodes, self.num_output_nodes]\n values.sort(reverse=True)\n height = (values[0] + 1) * spacing\n pygame.init()\n screen = pygame.display.set_mode([width, height])\n pygame.display.set_caption(\"Genetic Path Finding\") # name of the window created\n clock = pygame.time.Clock() # used to manage how fast the screen updates\n myfont = pygame.font.Font(None, 12) # sets the font for text in pygame\n drawing = True\n while drawing:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n return\n screen.fill((255, 255, 255))\n\n h_percentile = height - spacing\n w_percentile = (width - (spacing * 2)) / (self.num_hidden_layers + 2)\n # Nodes\n for node in range(self.num_input_nodes):\n pos = h_percentile / (self.num_input_nodes + 1)\n gg.draw_circle(screen, (105, 105, 105), self.node_pos(spacing, 'input', 1, node), 5, aa=True)\n for layer in range(self.num_hidden_layers):\n for node in range(self.num_hidden_nodes):\n pos = h_percentile / (self.num_hidden_nodes + 1)\n bias = self.biases[layer][node]\n color = gg.color_gradient(math.tanh(bias))\n gg.draw_circle(screen, color, self.node_pos(spacing, 'hidden', layer, node), 5, aa=True)\n for node in range(self.num_output_nodes):\n pos = h_percentile / (self.num_output_nodes + 1)\n bias = self.biases[-1][node]\n color = gg.color_gradient(math.tanh(bias))\n gg.draw_circle(screen, color, self.node_pos(spacing, 'output', 1, node), 5, aa=True)\n\n # Connections\n for inp in range(self.num_input_nodes):\n for node in range(self.num_hidden_nodes):\n weight = self.weights[0][node][inp]\n color = gg.color_gradient(math.tanh(weight))\n pygame.draw.aaline(screen, color, self.node_pos(spacing, 'input', 1, inp),\n self.node_pos(spacing, 'hidden', 0, node))\n for layer in range(0, self.num_hidden_layers - 1):\n for node in range(self.num_hidden_nodes):\n for other in range(self.num_hidden_nodes):\n weight = self.weights[layer + 1][other][node]\n color = gg.color_gradient(math.tanh(weight))\n pygame.draw.aaline(screen, color, self.node_pos(spacing, 'hidden', layer, node),\n self.node_pos(spacing, 'hidden', layer + 1, other))\n for node in range(self.num_hidden_nodes):\n for out in range(self.num_output_nodes):\n layer = self.num_hidden_layers\n weight = self.weights[layer][out][node]\n color = gg.color_gradient(math.tanh(weight))\n pygame.draw.aaline(screen, color, self.node_pos(spacing, 'hidden', layer - 1, node),\n self.node_pos(spacing, 'output', 1, out))\n\n pygame.display.flip()\n clock.tick(20)", "def getBestPath(self):\n if self._bestPathVertex.getNextWaypoint() is None:\n numWaypointsCompleted = len(self._waypoints)\n quality = 2\n if self._vertexQueue.isEmpty():\n quality += 1\n else:\n numWaypointsCompleted = self._bestPathVertex.getNextWaypoint().getIndex()\n quality = 1\n if self._vertexQueue.isEmpty():\n quality -= 1\n \n return outputPath.generatePath(self._bestPathVertex, self._params.waypointAcceptanceRadii, quality, numWaypointsCompleted)", "def action(self):\n\n # assume the smart opponent can always choose the best step\n # Depth First Search\n steps = 2\n stack = [(self.game_in_head, (), 0)]\n maxmin = None\n good_paths = []\n\n while len(stack) > 0:\n parent_node, path, score = stack.pop(-1)\n if len(path) >= steps*2:\n \n # leaf node in the search tree\n if maxmin is None:\n maxmin = score\n good_paths.append(path)\n elif maxmin == score:\n good_paths.append(path)\n elif maxmin < score:\n maxmin = score\n good_paths.clear()\n good_paths.append(path)\n else:\n # root node, find its leaves\n children_nodes = self.one_step_infe(parent_node, path, score)\n stack += children_nodes\n\n path_dec = random.choice(good_paths) \n if self.colour == 'upper':\n return path_dec[0] \n elif self.colour == 'lower':\n return path_dec[1]", "def _plot(self):\n\n #self.best_canvas.Clear()\n self.current_canvas.Clear()\n\n if len(self.results) > 0:\n x_max = self.results[-1][2]\n #self.best_canvas.xSpec = (0, x_max)\n self.current_canvas.xSpec = (0, x_max)\n\n # best_points = [(r.time, r.best.distance) for r in self.results\n # if r.best is not None and\n # isinstance(r.best.distance, int)]\n # best_line = PolyLine(best_points)\n # best_plot = PlotGraphics([best_line],\n # title='Best path distance over time',\n # xLabel='Time [ns]', yLabel='Distance')\n\n current_points = [self.TopLevelParent.solver_view.tsp_view._points[x] for x in self.results[-1][0]] if len(self.results) > 0 else []\n # current_points = [(r[2], r[0]) for r in self.results]\n if len(current_points) > 0:\n current_line = PolyLine(current_points)\n current_plot = PlotGraphics([current_line],\n title='Current path distance over time',\n xLabel='Iter', yLabel='Score')\n\n #self.best_canvas.Draw(best_plot)\n self.current_canvas.Draw(current_plot)", "def create_path(self):\n\n partials = []\n partials.append({})\n #print self.trip_id\n\n #this variable is true if we have not yet recorded the first edge of a path\n first_edge = True\n #this variable is false until we hit the midpoint\n hit_midpoint = False\n\n first_lasts = []\n first_lasts.append([0,0])\n matrices = []\n matrices.append([np.zeros((self.graph.rows,self.graph.cols)),0])\n edge_sets = []\n edge_sets.append([0 for i in range(self.graph.num_edges)])\n cur_line = self.line_num\n good_graphs = []\n good_graphs.append(True)\n nodes_visited = []\n nodes_visited.append([])\n #normalized = dg.normalize(self.graph.lines[cur_line])\n normalized = normalize_simple(self.graph.lines[cur_line])\n matrices_index = 0\n prev_coords = (-1,-1)\n while normalized[0] == self.trip_id:\n lat = normalized[1]\n lon = normalized[2]\n coords = self.graph.gps_to_coords(lat,lon)\n node = self.graph.coords_to_node(coords[0],coords[1])\n\n if prev_coords == (-1,-1) and coords[0] != -1:\n first_lasts[matrices_index][0] = node\n\n if coords[0] == -1 and prev_coords[0] != -1:\n prev_node = self.graph.coords_to_node(prev_coords[0],prev_coords[1])\n first_lasts[matrices_index][1] = prev_node\n\n if prev_coords != (-1,-1) and coords[0] != -1 and coords != prev_coords:\n edge_num = self.graph.edge_num(prev_coords[0],prev_coords[1],coords[0],coords[1])\n if edge_num == -1:\n good_graphs[matrices_index] = False\n else:\n edge_sets[matrices_index][edge_num] = 1\n if edge_num in partials[matrices_index] and partials[matrices_index][edge_num] == 0:\n del partials[matrices_index][edge_num]\n if not hit_midpoint:\n if first_edge:\n above = (prev_coords[0]-1,prev_coords[1])\n below = (prev_coords[0]+1,prev_coords[1])\n left = (prev_coords[0],prev_coords[1]-1)\n right = (prev_coords[0],prev_coords[1]+1)\n for next_coords in (above,below,left,right):\n other_edge = self.graph.edge_num(prev_coords[0],prev_coords[1],next_coords[0],next_coords[1])\n if other_edge != -1:\n partials[matrices_index][other_edge] = 0\n first_edge = False\n if self.graph.coords_to_node(prev_coords[0],prev_coords[1]) == self.midpoint:\n hit_midpoint = True\n partials[matrices_index][edge_num] = 1\n if self.graph.coords_to_node(coords[0],coords[1]) == self.midpoint:\n hit_midpoint = True\n\n\n\n if coords[0] == -1:\n matrices.append([np.zeros((self.graph.rows,self.graph.cols)),0])\n first_lasts.append([0,0])\n edge_sets.append([0 for i in range(self.graph.num_edges)])\n good_graphs.append(True)\n nodes_visited.append([])\n matrices_index += 1\n partials.append({})\n hit_midpoint = False\n first_edge = True\n \n elif coords[0] < self.graph.rows and coords[1] < self.graph.cols and not matrices[matrices_index][0][coords[0]][coords[1]]:\n matrices[matrices_index][1] += 1\n matrices[matrices_index][0][coords[0]][coords[1]] = 1\n nodes_visited[matrices_index].append(coords)\n\n prev_coords = coords\n\n cur_line += 1\n if cur_line == len(self.graph.lines):\n break\n #normalized = dg.normalize(self.graph.lines[cur_line])\n normalized = normalize_simple(self.graph.lines[cur_line])\n\n prev_node = self.graph.coords_to_node(prev_coords[0],prev_coords[1])\n first_lasts[matrices_index][1] = prev_node\n self.next_line = cur_line\n best_index = 0\n best_score = 0\n for matrix_index in range(len(matrices)):\n if matrices[matrix_index][1] > best_score:\n best_score = matrices[matrix_index][1]\n best_index = matrix_index\n\n for coords in nodes_visited[best_index]:\n self.graph.node_visit(self.trip_id,coords)\n \n\n if self.trip_id not in self.graph.trip_id2line_num:\n #if first_lasts[best_index] == [28,5]:\n # print \"a to b: %d\" % self.trip_id\n self.graph.first_last2trip_ids[tuple(first_lasts[best_index])].append(self.trip_id)\n\n return matrices[best_index][0],edge_sets[best_index],good_graphs[best_index],partials[best_index]", "def search_path(self):\n\n nodes = [self.start]\n final_node = None\n \n count = 0\n while True:\n count += 1\n\n if count % self.pick_target == 0:\n pick = self.goal.pos[:2]\n else:\n pick = self.car.random_pos()[:2]\n \n nearest = self.get_nearest_node(nodes, pick)\n\n if count % self.check_dubins == 0:\n solutions = self.dubins.find_tangents(nearest.pos, self.goal.pos)\n dubins_route, cost, valid = self.dubins.best_tangent(solutions)\n \n if valid:\n final_node = nearest\n break\n\n phi = self.get_steering_angle(nearest.pos, pick)\n pos = nearest.pos\n branch = [pos[:2]]\n \n for i in range(self.max_steps):\n pos = self.car.step(pos, phi)\n branch.append(pos[:2])\n \n # check safety of route-----------------------\n if phi == 0:\n safe = self.dubins.is_straight_route_safe(nearest.pos, pos)\n else:\n d, c, r = self.car.get_params(nearest.pos, phi)\n safe = self.dubins.is_turning_route_safe(nearest.pos, pos, d, c, r)\n # --------------------------------------------\n \n if not safe:\n continue\n \n new_node = Node(pos, phi, i+1)\n \n if new_node in nodes:\n continue\n \n new_node.branch = branch\n new_node.parent = nearest\n nodes.append(new_node)\n \n route = self.backtracking(final_node) + dubins_route\n path = self.car.get_path(self.car.start_pos, route)\n print('Total iteration:', count)\n \n return path, nodes", "def IteratePaths(self):\n self.w = self.setwage(self.K, self.N)\n self.r = self.setrate(self.K, self.N)\n self.b = self.benefit(self.N)\n\n a1, aT = [-1,], []\n\n for q in range(self.Nq):\n if q == 0:\n self.apath[-1] = 0.2\n elif q == 1:\n self.apath[-1] = 0.3\n else:\n self.apath[-1] = max(0,aT[-1]-(aT[-1]-aT[-2])*a1[-1]/(a1[-1]-a1[-2]))\n \n self.npath[-1] = 0\n self.cpath[-1] = self.apath[-1]*(1+self.r) + self.b\n\n for y in range(-2,-(self.T+1),-1): # y = -2, -3,..., -60\n self.apath[y], self.npath[y], self.cpath[y] = self.DirectSolve(y)\n\n aT.append(self.apath[-1])\n a1.append(self.apath[-self.T])\n if (fabs(self.apath[-self.T])<self.tol):\n break\n for y in range(-1,-(self.T+1),-1):\n self.upath[y] = self.util(self.cpath[y],self.npath[y])", "def solve(self):\n while self.character.path[-1] != 88:\n n = self.next_move()\n if n is None:\n self.character.path += ['Error: Could not find full path (budget does not suffice or unreachable).']\n break\n self.character.path += [n]\n self.updated_occupied_locations()\n self.currentTurn += 1", "def a_star_planning(start_x, start_y, goal_x, goal_y, id):\n # extract the index of start node, goal node and obstacles\n start = Point(round(start_x/grid_size), round(start_y/grid_size), 0.0, -1, [0,0,0])\n goal = Point(round(goal_x/grid_size), round(goal_y/grid_size), 0.0, -1, [0,0,0])\n if not_legal(goal, id):\n print ('not a legal goal')\n return False\n \n # time.sleep(10)\n\n # create the open list and close list to store nodes\n openset, closeset = deque(), deque()\n openset.append(start)\n\n while True:\n # find out the min f node to explore\n\n current_node = min(openset,\n key=lambda node: node.g + calculate_heuristic(node,goal))\n\n # pltplt.plot(current_node.x, current_node.y, \"b*\")\n if len(closeset) % 10 == 0:\n plt.pause(0.001)\n\n if current_node.x == goal.x and current_node.y == goal.y:\n print(\"Congratulations! You have found the goal!\")\n goal.parent = current_node\n break\n\n # Remove it from the open list\n openset.remove(current_node)\n # Add it to the close list\n closeset.append(current_node)\n\n # Explore the neighbour\n for motion in motions:\n if motion == current_node.parent_motion:\n turn_cost = 0\n elif (motion[0] == -1 * current_node.parent_motion[0]) and (motion[1] == -1 * current_node.parent_motion[1]):\n turn_cost = 1.5\n else:\n turn_cost = 1\n\n node = Point(current_node.x + motion[0],\n current_node.y + motion[1],\n current_node.g + motion[2] + turn_cost,\n current_node,\n motion,\n )\n\n # ignore it if it is in the close list\n flag = False\n for item in closeset:\n if item.x == node.x and item.y == node.y:\n flag = True\n break\n if flag:\n continue\n # ignore it if it is obstacle\n\n if not_legal(node, id):\n continue\n # update its parent if it is the open list\n flag = True\n for item in openset:\n if item.x == node.x and item.y == node.y:\n flag = False\n # if closer, update the parent\n if node.g <= item.g:\n item.g = node.g\n item.parent = node.parent\n item.parent_motion = node.parent_motion\n break\n # add to the open list if it is not in the open list\n if flag:\n openset.append(node)\n\n # generate the final path\n while True:\n route = deque()\n route.append(goal)\n plt.plot(goal.x, goal.y, \"rx\")\n if goal.parent == -1:\n break\n else:\n goal = goal.parent\n route.appendleft(goal)\n # return route\n # return False\n if NEED_DRAW:\n # draw map\n for i in range(map.gridwidth):\n for j in range(map.gridheight):\n if map.grid[1,i,j] >0:\n plt.plot(i, j, \"xc\")\n\n plt.plot(start.x, start.y, \"ro\")\n plt.plot(goal.x, goal.y, \"go\")\n\n for goal in route:\n plt.plot(goal.x, goal.y, \"rx\")\n plt.show()", "def astar(maze):\n # TODO: Write your code here\n start = maze.getStart()\n # p_queue = Queue.PriorityQueue()\n p_queue = []\n dim = maze.getDimensions()\n rows = dim[0]\n cols = dim[1]\n # backtrace.\n visited = {} \n lookup_table = {}\n for i in range (0, rows):\n for j in range (0, cols):\n visited[(i,j)] = (-1, -1)\n # heuristic, cost, prev\n lookup_table[(i, j)] = (-1, -1, (-1, -1))\n end = maze.getObjectives()[0]\n path = []\n # add startpoint to the queue.\n start_heuristic = 0 + abs(start[0] - end[0]) + abs(start[1] - end[1])\n # format: heuristic, current point so we can better sort. \n p_queue.append((start_heuristic, start))\n lookup_table[start] = (start_heuristic, 0, (-2, -2))\n while p_queue:\n pair = p_queue.pop(0)\n visited[pair[1]] = lookup_table.get(pair[1])[2]\n if pair[1] == end:\n break\n else:\n list_of_neighbors = maze.getNeighbors(pair[1][0], pair[1][1])\n for i in list_of_neighbors:\n # if i is part of path, skip i.\n if visited.get(i) != (-1, -1):\n cost = lookup_table.get(pair[1])[1] + 1\n heuristic = cost + abs(i[0] - end[0]) + abs(i[1] - end[1])\n old_heuristic = lookup_table[i][0]\n if cost < lookup_table.get(i)[1]:\n lookup_table[i] = (heuristic, cost, pair[1])\n #remove node from explored set and move it to frontier.\n visited[i] = (-1,-1)\n bisect.insort(p_queue, (heuristic, i))\n else:\n continue\n # if i is in the queue, we may check whether the new path is better.\n if (lookup_table.get(i)[0], i) in p_queue:\n cost = lookup_table.get(pair[1])[1] + 1\n heuristic = cost + abs(i[0] - end[0]) + abs(i[1] - end[1])\n old_heuristic = lookup_table[i][0]\n if cost < lookup_table.get(i)[1]:\n lookup_table[i] = (heuristic, cost, pair[1])\n # remove item by value and insert it again to the p_queue. \n p_queue.remove((old_heuristic, i))\n bisect.insort(p_queue, (heuristic, i))\n # if the point is not in the open_list, then we can add it to the open_list and the look_up table.\n else:\n cost = lookup_table.get(pair[1])[1] + 1\n heuristic = cost + abs(i[0] - end[0]) + abs(i[1] - end[1])\n lookup_table[i] = (heuristic, cost, pair[1])\n bisect.insort(p_queue, (heuristic, i))\n # We are done!!!\n pt = end\n while pt != start:\n path.append(pt)\n pt = visited.get(pt)\n path.append(start)\n path.reverse()\n return path", "def solve_maze(self):\r\n # if there is no maze to solve, cut the method\r\n if not self.generated:\r\n return None\r\n\r\n # initialize with empty path at starting cell\r\n self.path = dict()\r\n current = self.start\r\n\r\n # loop until the ending cell is reached\r\n while True:\r\n while True:\r\n # choose valid direction\r\n # must remain in the grid\r\n # also must not cross a wall\r\n dirNum = random.randint(0,3)\r\n adjacent = self.get_next_cell(current,dirNum,1)\r\n if self.is_valid_direction(current,dirNum):\r\n hasWall = (self.grid[adjacent[0]][adjacent[1]] == 0)\r\n if not hasWall:\r\n break\r\n # add cell and direction to path\r\n self.path[current] = dirNum\r\n\r\n # get next cell\r\n current = self.get_next_cell(current,dirNum,2)\r\n if current == self.end: \r\n break # break if ending cell is reached\r\n\r\n # go to start of path\r\n current = self.start\r\n self.solution.append(current)\r\n # loop until end of path is reached\r\n while not (current == self.end):\r\n dirNum = self.path[current] # get direction\r\n # add adjacent and crossed cells to solution\r\n crossed = self.get_next_cell(current,dirNum,1)\r\n current = self.get_next_cell(current,dirNum,2)\r\n self.solution.append(crossed)\r\n self.solution.append(current)\r\n\r\n self.path = dict()", "def findRoute(self, returnNonSelection=False):\n \n # pick the start and end GPS points # TODO: sort GPS Points first\n start_point = self.gps_points[0]\n end_point = self.gps_points[-1]\n \n start_node = self.getNearestNode(start_point)\n end_node = self.getNearestNode(end_point)\n \n # the start and endnodes returnes by the index are not in the graph, \n # therefore we need to look them up ....\n \n start_node = self.node_counter__node.get(start_node.getAttributes().get(\"nodecounter\"))\n end_node = self.node_counter__node.get(end_node.getAttributes().get(\"nodecounter\"))\n \n self.routfinder = RouteFinder(self.G)\n label_list = self.routefinder.findroutes(start_node, end_node)\n\n label_scores = []\n \n \n \n # let us loop through the label list \n for label in label_list:\n number_of_points = 0\n # we sum up the number of points and relate them to the length of the route\n print label\n \n for edge in label.getEdges():\n\n edge_id = edge.getAttributes().get(self.shapeFileUniqueId)\n number_of_points = number_of_points + self.edge_id__count.get(edge_id, 0)\n print \" \", number_of_points\n #we add the scores to a dict\n \n if number_of_points > 1:\n label_scores.append((label, number_of_points/label.getLength()))\n \n # print label_scores\n \n # and extract the maximum score\n score = 0\n selected = None\n \n for ls in label_scores:\n if ls[1] > score:\n selected = ls[0]\n score = ls[1]\n \n if returnNonSelection:\n pass\n else:\n return selected", "def a_star(self, mapdata, start, goal):\n\n print \"Inside A star\"\n rospy.loginfo(\"Generate path from (%d,%d) to (%d,%d)\" % (start[0], start[1], goal[0], goal[1]))\n if not PathPlanner.is_cell_walkable(mapdata, goal[0], goal[1]):\n rospy.logerr(\"not walkable goal\")\n return[]\n #calculated from goal\n frontier = PriorityQueue()\n frontier.put(start, 0)\n came_from = {}\n cost_so_far = {}\n came_from[start] = None\n cost_so_far[start] = 0\n\n while not frontier.empty():\n frontier_msg = GridCells()\n frontier_cells = []\n for e in frontier.elements:\n frontier_cells.append(PathPlanner.grid_to_world(mapdata, e[1][0], e[1][1]))\n frontier_msg.header = mapdata.header\n frontier_msg.header.stamp = rospy.get_rostime()\n frontier_msg.cell_width = mapdata.info.resolution\n frontier_msg.cell_height = mapdata.info.resolution\n frontier_msg.cells = frontier_cells\n expanded_msg = GridCells()\n expanded_cells = []\n for e in cost_so_far: \n expanded_cells.append(PathPlanner.grid_to_world(mapdata, e[0], e[1]))\n \n expanded_msg.header = mapdata.header\n expanded_msg.header.stamp = rospy.get_rostime()\n expanded_msg.cell_width = mapdata.info.resolution\n expanded_msg.cell_height = mapdata.info.resolution\n expanded_msg.cells = expanded_cells\n self.expanded_pub.publish(expanded_msg)\n rospy.sleep(0.01)\n\n current = frontier.get()\n\n #creates path\n if current == goal:\n entry = goal\n listOfCoord = []\n while entry != None:\n listOfCoord.append(entry)\n entry = came_from[entry]\n listOfCoord.reverse()\n self.expanded_pub.publish(PathPlanner.createGridcells(mapdata, listOfCoord))\n return listOfCoord\n \n for next in PathPlanner.neighbors_of_8(mapdata, current[0], current[1]):\n new_cost = cost_so_far[current] + 1 #assume cost to move each unit is 1\n if next not in cost_so_far or new_cost < cost_so_far[next]:\n cost_so_far[next] = new_cost\n priority = new_cost + PathPlanner.euclidean_distance(next[0], next[1], goal[0], goal[1])\n frontier.put(next, priority)\n came_from[next] = current\n\n \n return[]", "def astar(grid, heuristic):\r\n\r\n print (grid.getStart())\r\n frontier = PriorityQueue()\r\n frontierCpy = {}\r\n\r\n goal = grid.getGoals()[0]\r\n\r\n startX = grid.getStart()[0]\r\n startY = grid.getStart()[1]\r\n startNode = Node(((startX, startY), 0), None)\r\n\r\n init_heu = heuristic(startNode.cell[0], goal)\r\n frontierCpy[startNode.cell[0]] = init_heu\r\n frontier.put((init_heu, 0, startNode))\r\n\r\n while frontier.qsize() != 0:\r\n tup = frontier.get()\r\n\r\n currNode = tup[2]\r\n currG = tup[1] * -1\r\n grid.addVisited(currNode.cell[0])\r\n frontierCpy.pop(currNode.cell[0], None)\r\n\r\n if currNode.cell[0] == goal:\r\n path = []\r\n while currNode != None:\r\n path.insert(0, currNode.cell[0])\r\n currNode = currNode.parent\r\n grid.setPath(path)\r\n return path\r\n\r\n\r\n neighbors = grid.getNeighbors(currNode.cell[0])\r\n\r\n for n in neighbors:\r\n if n[0] not in grid.getVisited():\r\n newNode = Node(n, currNode)\r\n\r\n h = heuristic(n[0], goal)\r\n\r\n oneStepCost = n[1]\r\n g = oneStepCost + currG\r\n if n[0] not in frontierCpy or frontierCpy[n[0]] > h + g:\r\n frontier.put((h+g, -1*g, newNode))\r\n frontierCpy[n[0]] = h+g\r\n print(\"CANT FIND A PATH\")" ]
[ "0.69775563", "0.6936342", "0.67015743", "0.66100115", "0.65149343", "0.6455855", "0.645364", "0.64318365", "0.638702", "0.6368889", "0.629313", "0.6167785", "0.6155954", "0.6152433", "0.61210215", "0.6116109", "0.61007947", "0.60975003", "0.60924315", "0.6082624", "0.60740525", "0.6064072", "0.60601115", "0.6024515", "0.6024014", "0.6023423", "0.6017623", "0.6005633", "0.600557", "0.6001532" ]
0.7546996
0
Part of the A algorithm. Sets the parent of the node and calculates the g, h and ffunction
def attach_and_eval(node, parent, goal): node.set_parent(parent) node.g = parent.g + node.get_arc_cost() node.heuristic(goal) node.f = node.g + node.h
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setF(self):\n if self.parent: self.f = self.setG(self.parent.g) + self.setH()\n else: self.f = self.setG() + self.setH()\n return self.f", "def f(self,node):\r\n return (self.a*self.nodeDegree(node))/(1+self.b*self.nodeDegree(node))", "def __init__(self, parent, data, g, h, f):\r\n self.parent = parent\r\n self.data = data\r\n self.g = g\r\n self.h = h\r\n self.f = f", "def __init__(self, state, goal, parent=None):\n self.state = state\n self.neighbors = []\n self.visited_right = False\n self.visited_left = False\n self.parent_right = None\n self.parent_left = None\n self.parent = parent\n self.g = 0 if not parent else parent.g + 1\n self.h = self.get_h(goal)\n self.f = self.g + self.h", "def __init__(self, board, action, cost, parent):\n self.board = board\n self.action = action\n self.cost = cost\n self.parent = parent\n self.estimate = cost + board.h() # A* \"f\" function", "def f(self):\n return self.g() + self.h()", "def fG(self):\n pass", "def h(self,node):\n return 0", "def __init__(self, value, parent = None):\n # initialize new node\n self.value = value\n self.parent = parent\n self.left = None\n self.right = None\n self.height = 1", "def fA(self):\n pass", "def main(G): \n try:\n val_map = {'A': 1.0,\n 'D': 0.5714285714285714,\n 'H': 0.0}\n values = [val_map.get(node, 0.45) for node in G.nodes()]\n edge_colors = 'k'\n \n edge_labels=dict([((u,v,),d['weight'])\n for u,v,d in G.edges(data=True)])\n pos=nx.spring_layout(G) # positions for all nodes \n nx.draw_networkx_edge_labels(G,pos,edge_labels=edge_labels)\n nx.draw(G,pos, node_color = values, node_size=15,edge_color=edge_colors,edge_cmap=plt.cm.Reds)\n pylab.show()\n\n for ite in range(len(G.nodes())):\n \n Iterations = ite \n SL = SIG.Single_linkage(G, Iterations)\n pos=nx.spring_layout(G) # positions for all nodes\n node_colors = ['b','g','r','y','c','k','m','w']\n for i in range(len(G)):\n node_colors.append('w')\n \n # nodes\n C_list = SL.fit_predict(G)[-1,:]\n for Clust in range(C_list.shape[1]):\n nx.draw_networkx_nodes(G,pos,\n nodelist = list(C_list[0,Clust]),\n node_color=node_colors[Clust],\n node_size=80,\n alpha=0.8)\n \n # edges\n nx.draw_networkx_edges(G,pos,width=1.0,alpha=0.5)\n nx.draw_networkx_edge_labels(G,pos,edge_labels=edge_labels)\n \n plt.axis('off')\n plt.savefig(\"labels_and_colors.png\") # save as png\n plt.show() # display\n print \"in level :\",ite \n print SL.__str__()\n\n\n except SIG.Single_linkage_Error:\n \n print( \"Got an imput error, please change the input and try it again.\" )", "def __init__(self, parent = None, label=''):\n self.parent = parent\n self.children = None\n self.data = None\n self.label = label\n self.dist = None\n self.sequence = None # The sequence after an alignment have been mapped (leaf) or the most parsimonous sequence (ancestral)\n self.seqscores = None # The scores propagated from leaves via children\n self.backptr = None # Pointers back to children: what symbol rendered current/parent symbols", "def add_parent_attributes(self):\n if len(self.parent_attributes) == 0:\n return\n dest = self.parent.attributes\n source = self.parent_attributes\n changes = {}\n self.merge_attribute_defs(dest, source, changes)\n for aid, value in changes.iteritems():\n# self.parent.h5node.attrs[aid] = value\n # may need modifying for MATLAB\n #- if self.path not in self.file.file_pointer:\n if self.file.get_node(self.path, abort=False) is None:\n # create parent node since it does not exist\n print \"trying to set parent attributes on non-registered parent node:\"\n print \"Non-registered parent node is: '%s'\", self.path\n traceback.print_stack()\n sys.exit(1)\n #- self.file.file_pointer[self.path].attrs[aid] = value\n self.file.set_attribute(self.path, aid, value)", "def __init__(self, coords, goal_coords, current_path_length) :\r\n\r\n self.coords = coords #The coordinates of the node\r\n\r\n #Calculating the g(n) value of node\r\n self.calculate_gn_value(current_path_length)\r\n\r\n #Calculating the h(n) value of node\r\n self.calculate_hn_value(goal_coords)\r\n\r\n #Calculating f(n) value of node\r\n self.calculate_fn_value()", "def parent_connect(self, node):\n if self.parent.get() >= self.data:\n self.parent.set_left(node)\n if node and node.left is not None:\n node.set_parent(self.parent)\n else:\n self.parent.set_right(node)\n if node and node.left is not None:\n node.set_parent(self.parent)", "def propagate_path_improvements(parent):\n for child in parent.children:\n if parent.g + 1 < child.g:\n child.set_parent(parent)\n child.g = parent.g + child.get_arc_cost()\n child.f = child.g + child.h\n # Recursive call to propagate possible path improvements to all children of the children\n propagate_path_improvements(child)", "def __call__(self, node_A):\r\n new_node = Op.__call__(self)\r\n new_node.inputs = [node_A]\r\n new_node.name = \"Sqrt(%s)\" % (node_A.name)\r\n return new_node", "def agg_func(config,parent_child_node):\n parent_child_node = list(parent_child_node)\n parent_geocode = parent_child_node[0] \n # a list of the node objects\n nodes = list(list(parent_child_node)[1])\n \n #calculate the length of each of the geocodes (to determine which is the parent)\n geocode_lens = [len(node.geocode) for node in nodes]\n #the parent is the shortest geocode\n parent = nodes[np.argmin(geocode_lens)]\n \n #subset the children nodes\n children = nodes[:np.argmin(geocode_lens)] + nodes[np.argmin(geocode_lens)+1:]\n children = sorted(children, key=lambda geocode_data: int(geocode_data.geocode))\n child_geos = [child.geocode for child in children]\n \n parent.backup_solve = children[0].parent_backup_solve\n syn_agg = sparse.multiSparse(np.zeros(parent.syn.shape))\n \n for child in children:\n syn_agg = syn_agg + child.syn\n parent.syn = syn_agg\n \n return parent", "def graph(self):\n ...", "def apply(self, fgraph):\r\n pass", "def parent(self, v):\n # method here", "def parent(self, A, i):\n if i == 0:\n return None\n div = float(i)/2\n if div.is_integer():\n div -= 1\n return int(div)", "def parent(self, A, i):\n if i == 0:\n return None\n div = float(i)/2\n if div.is_integer():\n div -= 1\n return int(div)", "def __init__(self,F,isMax,findZ):\r\n nx.DiGraph.__init__(self)\r\n tree = create_clique_tree(F.g)\r\n d_tree = tree.to_directed()\r\n self.add_nodes_from( d_tree.nodes(data=True) )\r\n self.add_edges_from( d_tree.edges(data=True) )\r\n self.nop = 0\r\n \r\n self.compute_clique_potentials(F)\r\n self.calibrate(isMax,findZ)", "def init_parent_lookup(parent_lookup, x, y):\n if x not in parent_lookup:\n parent_lookup[x] = x\n # single person family has himself as parent\n # maxCircle function decides\n if y not in parent_lookup:\n parent_lookup[y] = y", "def set_parent_of(self, parentof, expiration, timestamp):\n return self \\\n .V(parentof.parent_vid) \\\n .is_asset() \\\n .as_('parent_v') \\\n .V(parentof.child_vid) \\\n .is_asset() \\\n .coalesce(\n # The edge exists.\n __.inE('parent_of').filter(\n __.outV().id().is_(parentof.parent_vid))\n .choose(\n __.values('first_seen').is_(P.gt(timestamp)),\n __.property('first_seen', timestamp),\n __.identity(),\n )\n .choose(\n __.values('last_seen').is_(P.lt(timestamp)),\n __.property('last_seen', timestamp) \\\n .property('expiration', expiration),\n __.identity(),\n )\n .project('edge', 'exists')\n .by(__.identity().elementMap())\n .by(__.constant(True)),\n # The edge does not exist.\n __.addE('parent_of').from_('parent_v')\n .property(T.id, str(uuid.uuid4()))\n .property('first_seen', timestamp)\n .property('last_seen', timestamp)\n .property('expiration', expiration)\n .project('edge', 'exists')\n .by(__.identity().elementMap())\n .by(__.constant(False)),\n )", "def __init__(self, data, parent):\n self.left = None\n self.right = None\n self.data = data\n self.parent = parent", "def apply(self, f):\n if self.is_empty():\n return 0\n else:\n self.get_root().value = f(self.get_root().value)\n if self.get_left():\n self.get_left().apply(f)\n if self.get_right():\n self.get_right().apply(f)", "def __init__(self, state, parent, action, path_cost):\n self.state = state\n self.parent = parent\n self.action = action\n self.path_cost = path_cost\n self.depth = 0\n if parent:\n self.depth = parent.depth + 1", "def __init__(self, x, y):\n\n self.x = x\n self.y = y\n self.path_x = []\n self.path_y = []\n self.parent = None\n self.cost = 0.0" ]
[ "0.6539606", "0.5932769", "0.5919824", "0.56225765", "0.5440231", "0.5285597", "0.52696234", "0.5260468", "0.5191788", "0.51808476", "0.5132827", "0.51117384", "0.5083697", "0.50612867", "0.5060935", "0.5059894", "0.50431925", "0.5036736", "0.5033938", "0.5027082", "0.50177044", "0.50152355", "0.50152355", "0.5003413", "0.49984202", "0.49827302", "0.49692386", "0.4964392", "0.49605262", "0.49485296" ]
0.63525325
1
Creates a Trainer object. training_sampler Sampler that samples training dataset. validation_sampler Sampler that samples validation dataset. Can be None. executor Graph executor to run the network. optimizer The optimizer to use for training. network_output The node name of the network prediction (value or classification) for accuracy computation. Can be None.
def __init__(self, training_sampler: Sampler, validation_sampler: Optional[Sampler], executor: GraphExecutor, optimizer: Optimizer, network_output: Optional[str] = None): self.train_set = training_sampler self.test_set = validation_sampler self.executor = executor self.optimizer = optimizer self.network_output = network_output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train(parser):\n cli_args = add_all_args(parser, TRAINING)\n if not cli_args.train_tfrecord and not cli_args.valid_tfrecord:\n assert (\n cli_args.relative_labels or cli_args.xml_labels_folder\n ), 'No labels provided: specify --relative-labels or --xml-labels-folder'\n if cli_args.augmentation_preset:\n assert (\n preset := cli_args.augmentation_preset\n ) in AUGMENTATION_PRESETS, f'Invalid augmentation preset {preset}'\n trainer = Trainer(\n input_shape=cli_args.input_shape,\n model_configuration=cli_args.model_cfg,\n classes_file=cli_args.classes,\n train_tf_record=cli_args.train_tfrecord,\n valid_tf_record=cli_args.valid_tfrecord,\n max_boxes=cli_args.max_boxes,\n iou_threshold=cli_args.iou_threshold,\n score_threshold=cli_args.score_threshold,\n image_folder=cli_args.image_folder,\n )\n trainer.train(\n epochs=cli_args.epochs,\n batch_size=cli_args.batch_size,\n learning_rate=cli_args.learning_rate,\n new_dataset_conf={\n 'dataset_name': (d_name := cli_args.dataset_name),\n 'relative_labels': cli_args.relative_labels,\n 'test_size': cli_args.test_size,\n 'voc_conf': cli_args.voc_conf,\n 'augmentation': bool((preset := cli_args.augmentation_preset)),\n 'sequences': AUGMENTATION_PRESETS.get(preset),\n 'aug_workers': cli_args.workers,\n 'aug_batch_size': cli_args.process_batch_size,\n },\n dataset_name=d_name,\n weights=cli_args.weights,\n evaluate=cli_args.evaluate,\n merge_evaluation=cli_args.merge_evaluation,\n evaluation_workers=cli_args.workers,\n shuffle_buffer=cli_args.shuffle_buffer,\n min_overlaps=cli_args.min_overlaps,\n display_stats=cli_args.display_stats,\n plot_stats=cli_args.plot_stats,\n save_figs=cli_args.save_figs,\n clear_outputs=cli_args.clear_output,\n n_epoch_eval=cli_args.n_eval,\n )", "def train(self):\n train_dataloader = self.get_train_dataloader()\n\n if self.args.max_steps > 0:\n t_total = self.args.max_steps\n num_train_epochs = (\n self.args.max_steps // (len(train_dataloader) // self.args.gradient_accumulation_steps) + 1\n )\n else:\n t_total = int(len(train_dataloader) // self.args.gradient_accumulation_steps * self.args.num_train_epochs)\n num_train_epochs = self.args.num_train_epochs\n\n lr_scheduler = orttrainer.optim.LinearWarmupLRScheduler(t_total, self.args.warmup_steps / float(t_total))\n\n loss_scaler = amp.DynamicLossScaler() if self.args.fp16 else None\n device = self.args.device.type\n\n device = f\"{device}:{self.args.device.index}\" if self.args.device.index else f\"{device}:0\"\n options = orttrainer.ORTTrainerOptions(\n {\n \"batch\": {\"gradient_accumulation_steps\": self.args.gradient_accumulation_steps},\n \"device\": {\"id\": device},\n \"mixed_precision\": {\"enabled\": self.args.fp16, \"loss_scaler\": loss_scaler},\n \"debug\": {\n \"deterministic_compute\": True,\n },\n \"utils\": {\"grad_norm_clip\": False},\n \"distributed\": {\n # we are running single node multi gpu test. thus world_rank = local_rank\n # and world_size = self.args.n_gpu\n \"world_rank\": max(0, self.args.local_rank),\n \"world_size\": int(self.world_size),\n \"local_rank\": max(0, self.args.local_rank),\n \"allreduce_post_accumulation\": True,\n },\n \"lr_scheduler\": lr_scheduler,\n }\n )\n\n param_optimizer = list(self.model.named_parameters())\n params = [\n {\n \"params\": [n for n, p in param_optimizer if \"bias\" in n or \"LayerNorm.weight\" in n],\n \"weight_decay_mode\": 1,\n },\n {\n \"params\": [n for n, p in param_optimizer if not (\"bias\" in n or \"LayerNorm.weight\" in n)],\n \"weight_decay_mode\": 1,\n },\n ]\n\n optim_config = optim.AdamConfig(params=params, lr=2e-5, do_bias_correction=True)\n self.model = orttrainer.ORTTrainer(self.model, self.model_desc, optim_config, options=options)\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataloader.dataset))\n logger.info(\" Num Epochs = %d\", num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", self.args.per_gpu_train_batch_size)\n logger.info(\n \" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n self.args.train_batch_size\n * self.args.gradient_accumulation_steps\n * (torch.distributed.get_world_size() if self.args.local_rank != -1 else 1),\n )\n logger.info(\" Gradient Accumulation steps = %d\", self.args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n\n global_step = 0\n epochs_trained = 0\n steps_trained_in_current_epoch = 0\n\n tr_loss = 0.0\n logging_loss = 0.0\n train_iterator = trange(\n epochs_trained,\n int(num_train_epochs),\n desc=\"Epoch\",\n disable=self.args.local_rank not in [-1, 0],\n )\n\n for _epoch in train_iterator:\n epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\", disable=self.args.local_rank not in [-1, 0])\n for step, inputs in enumerate(epoch_iterator):\n # Skip past any already trained steps if resuming training\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n continue\n\n tr_loss += self._training_step(self.model, inputs)\n\n if (step + 1) % self.args.gradient_accumulation_steps == 0 or (\n len(epoch_iterator) <= self.args.gradient_accumulation_steps and (step + 1) == len(epoch_iterator)\n ):\n global_step += 1\n\n if self.args.local_rank in [-1, 0]:\n if (self.args.logging_steps > 0 and global_step % self.args.logging_steps == 0) or (\n global_step == 1 and self.args.logging_first_step\n ):\n logs = {}\n if self.args.evaluate_during_training:\n results = self.evaluate()\n for key, value in results.items():\n eval_key = f\"eval_{key}\"\n logs[eval_key] = value\n\n loss_scalar = (tr_loss - logging_loss) / self.args.logging_steps\n\n logs[\"loss\"] = loss_scalar\n logging_loss = tr_loss\n\n epoch_iterator.write(json.dumps({**logs, **{\"step\": global_step}}))\n\n if self.args.max_steps > 0 and global_step > self.args.max_steps:\n epoch_iterator.close()\n break\n if self.args.max_steps > 0 and global_step > self.args.max_steps:\n train_iterator.close()\n break\n\n logger.info(\"\\n\\nTraining completed. \\n\\n\")\n return TrainOutput(global_step, tr_loss / global_step)", "def __init__(self, encoded_network, input_shape, n_classes, batch_size=256,\n log_path=\"./trainer\", variable_scope=\"custom\"):\n super(DefaultNASTrainer, self).__init__(\n encoded_network=encoded_network,\n input_shape=input_shape,\n n_classes=n_classes,\n batch_size=batch_size,\n log_path=log_path,\n variable_scope=variable_scope\n )\n self._set_estimator()", "def make_keras_like(trainer, evaluator, validation_loader):\n training_history = {'accuracy': [], 'loss': []}\n validation_history = {'accuracy': [], 'loss': []}\n last_epoch = []\n\n RunningAverage(output_transform=lambda x: x[0]).attach(trainer, 'loss')\n RunningAverage(Accuracy(output_transform=lambda x: (x[1], x[2]))).attach(trainer, 'accuracy')\n\n prog_bar = ProgressBar()\n prog_bar.attach(trainer, ['loss', 'accuracy'])\n\n prog_bar_vd = ProgressBar()\n prog_bar_vd.attach(evaluator)\n from ignite.handlers import Timer\n\n timer = Timer(average=True)\n timer.attach(trainer, start=Events.EPOCH_STARTED,\n resume=Events.EPOCH_STARTED,\n pause=Events.EPOCH_COMPLETED,\n step=Events.EPOCH_COMPLETED)\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_validation_results(trainer):\n metrics = trainer.state.metrics\n accuracy = metrics['accuracy'] * 100\n loss = metrics['nll']\n last_epoch.append(0)\n training_history['accuracy'].append(accuracy)\n training_history['loss'].append(loss)\n train_msg = \"Train Epoch {}: acc: {:.2f}% loss: \".format(trainer.state.epoch, accuracy) + \\\n \"{:.2f}, train time: {:.2f}s\".format(loss, timer.value())\n\n evaluator.run(validation_loader)\n metrics = evaluator.state.metrics\n accuracy = metrics['accuracy'] * 100\n loss = metrics['nll']\n validation_history['accuracy'].append(accuracy)\n validation_history['loss'].append(loss)\n val_msg = \"Valid Epoch {}: acc: {:.2f}% loss: {:.2f}\".format(trainer.state.epoch, accuracy, loss)\n\n prog_bar_vd.log_message(train_msg + \" --- \" + val_msg)", "def __init__(\n self,\n loss_fn,\n device: torch.device,\n metrics_map: MetricsMapType = None,\n epochs: int = 5,\n batch_size: int = 64,\n reporting_interval: int = 1,\n shuffle: bool = True,\n num_workers: int = 0,\n ):\n if loss_fn is None:\n raise ValueError(\"FATAL ERROR: Trainer() -> 'loss_fn' cannot be None\")\n if device is None:\n raise ValueError(\"FATAL ERROR: Trainer() -> 'device' cannot be None\")\n if epochs < 1:\n raise ValueError(\"FATAL ERROR: Trainer() -> 'epochs' >= 1\")\n # batch_size can be -ve\n batch_size = -1 if batch_size < 0 else batch_size\n reporting_interval = 1 if reporting_interval < 1 else reporting_interval\n assert num_workers >= 0, \"FATAL ERROR: Trainer() -> 'num_workers' must be >= 0\"\n\n self.loss_fn = loss_fn\n self.device = device\n self.metrics_map = metrics_map\n self.epochs = epochs\n self.batch_size = batch_size\n self.reporting_interval = reporting_interval\n self.shuffle = shuffle\n self.num_workers = num_workers", "def main():\n model = th.nn.Linear(1, 1)\n optim = th.optim.Adam(model.parameters(), lr=1e-3)\n\n def loss_fn(model, data):\n x, target = data\n output = model(x)\n loss = th.mean(th.square(output - target))\n return loss\n\n def get_loader():\n while True:\n # NOTE(ycho): `32` here is a dummy fictitious batch size.\n x = th.empty((32, 1), dtype=th.float32)\n y = th.empty((32, 1), dtype=th.float32)\n yield (x, y)\n\n trainer = Trainer(\n Trainer.Settings(train_steps=1),\n model,\n optim,\n loss_fn,\n Hub(),\n get_loader())\n\n trainer.train()", "def __init__(self, train0_loader=None, train1_loader=None, train2_loader=None, val_loader=None, ae_epoch=40, train_epoch=40, ae_lr=0.0005, classify_lr=0.0005, writer=None):\n\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n self.net0 = CAE()\n self.net1 = CAE()\n self.net2 = CAE()\n\n self.net0 = self.net0.to(self.device)\n self.net1 = self.net1.to(self.device)\n self.net2 = self.net2.to(self.device)\n\n self.ae_epoch = ae_epoch\n self.train_epoch = train_epoch\n\n # Optimizer\n self.ae0_optimizer = optim.AdamW(itertools.chain(self.net0.encoder.parameters(), self.net0.decoder.parameters()), lr=ae_lr, betas=(0.9, 0.999))\n self.ae1_optimizer = optim.AdamW(itertools.chain(self.net1.encoder.parameters(), self.net1.decoder.parameters()), lr=ae_lr, betas=(0.9, 0.999))\n self.ae2_optimizer = optim.AdamW(itertools.chain(self.net2.encoder.parameters(), self.net2.decoder.parameters()), lr=ae_lr, betas=(0.9, 0.999))\n self.optimizer0 = optim.AdamW(itertools.chain(self.net0.encoder.parameters(), self.net0.classifier.parameters()), lr=classify_lr, betas=(0.9, 0.999))\n self.optimizer1 = optim.AdamW(itertools.chain(self.net1.encoder.parameters(), self.net1.classifier.parameters()), lr=classify_lr, betas=(0.9, 0.999))\n self.optimizer2 = optim.AdamW(itertools.chain(self.net2.encoder.parameters(), self.net2.classifier.parameters()), lr=classify_lr, betas=(0.9, 0.999))\n\n # Loss Function\n self.criterion = nn.CrossEntropyLoss()\n self.ae_criterion = nn.MSELoss()\n\n # Data\n self.train0_loader = train0_loader\n self.train1_loader = train1_loader\n self.train2_loader = train2_loader\n self.val_loader = val_loader\n\n self.writer = writer", "def train( # type: ignore\n self,\n pl_trainer_args: Dict[str, Any],\n model_args: Dict[str, Union[float, str, int]],\n dataset_args: Dict[str, Union[float, str, int]],\n dataset: GFlowNetDataset,\n environment: GraphBuildingEnv,\n context: GraphBuildingEnvContext,\n task: GFlowNetTask,\n ) -> None:\n\n logger.info(f\"Trainer arguments: {pl_trainer_args}\")\n\n if pl_trainer_args[\n \"resume_from_checkpoint\"\n ] is not None and not pl_trainer_args[\"resume_from_checkpoint\"].endswith(\n \".ckpt\"\n ):\n pl_trainer_args[\"resume_from_checkpoint\"] = None\n\n pl_trainer_args[\"callbacks\"] = {\n \"model_checkpoint_callback\": {\"save_top_k\": pl_trainer_args[\"save_top_k\"]}\n }\n\n pl_trainer_args[\"callbacks\"] = self.add_callbacks(pl_trainer_args[\"callbacks\"])\n\n pl_trainer_args[\"logger\"] = TensorBoardLogger(\n pl_trainer_args[\"save_dir\"], name=pl_trainer_args[\"basename\"]\n )\n\n trainer = Trainer(\n profiler=pl_trainer_args[\"profiler\"],\n logger=pl_trainer_args[\"logger\"],\n log_every_n_steps=pl_trainer_args[\"trainer_log_every_n_steps\"],\n callbacks=pl_trainer_args[\"callbacks\"],\n max_epochs=pl_trainer_args[\"epochs\"],\n strategy=pl_trainer_args[\"strategy\"],\n fast_dev_run=pl_trainer_args[\"development_mode\"],\n )\n\n data_module, model_module = self.get_data_and_model_modules(\n model_args,\n dataset_args,\n pl_trainer_args,\n dataset,\n environment,\n context,\n task,\n )\n trainer.fit(model_module, data_module)", "def create_trainer(session_name, # type: AnyStr\n save_dir, # type: AnyStr\n config, # type: thelper.typedefs.ConfigDict\n model, # type: thelper.typedefs.ModelType\n task, # type: thelper.tasks.Task\n loaders, # type: thelper.typedefs.MultiLoaderType\n ckptdata=None # type: Optional[thelper.typedefs.CheckpointContentType]\n ): # type: (...) -> thelper.train.Trainer\n assert \"trainer\" in config and config[\"trainer\"], \"session configuration dictionary missing 'trainer' section\"\n trainer_config = config[\"trainer\"]\n if \"type\" not in trainer_config:\n if isinstance(task, thelper.tasks.Classification):\n trainer_type = thelper.train.ImageClassifTrainer\n elif isinstance(task, thelper.tasks.Detection):\n trainer_type = thelper.train.ObjDetectTrainer\n elif isinstance(task, thelper.tasks.Regression):\n trainer_type = thelper.train.RegressionTrainer\n elif isinstance(task, thelper.tasks.Segmentation):\n trainer_type = thelper.train.ImageSegmTrainer\n else:\n raise AssertionError(f\"unknown trainer type required for task '{str(task)}'\")\n else:\n trainer_type = thelper.utils.import_class(trainer_config[\"type\"])\n return trainer_type(session_name, save_dir, model, task, loaders, config, ckptdata=ckptdata)", "def build_trainer(\n name: str,\n *,\n default_config: Optional[TrainerConfigDict] = None,\n validate_config: Optional[Callable[[TrainerConfigDict], None]] = None,\n default_policy: Optional[Type[Policy]] = None,\n get_policy_class: Optional[Callable[[TrainerConfigDict], Optional[Type[\n Policy]]]] = None,\n validate_env: Optional[Callable[[EnvType, EnvContext], None]] = None,\n before_init: Optional[Callable[[Trainer], None]] = None,\n after_init: Optional[Callable[[Trainer], None]] = None,\n before_evaluate_fn: Optional[Callable[[Trainer], None]] = None,\n mixins: Optional[List[type]] = None,\n execution_plan: Optional[Callable[[\n WorkerSet, TrainerConfigDict\n ], Iterable[ResultDict]]] = default_execution_plan) -> Type[Trainer]:\n\n original_kwargs = locals().copy()\n base = add_mixins(Trainer, mixins)\n\n class trainer_cls(base):\n _name = name\n _default_config = default_config or COMMON_CONFIG\n _policy_class = default_policy\n\n def __init__(self, config=None, env=None, logger_creator=None):\n Trainer.__init__(self, config, env, logger_creator)\n\n def _init(self, config: TrainerConfigDict,\n env_creator: Callable[[EnvConfigDict], EnvType]):\n # Validate config via custom validation function.\n if validate_config:\n validate_config(config)\n\n # No `get_policy_class` function.\n if get_policy_class is None:\n # Default_policy must be provided (unless in multi-agent mode,\n # where each policy can have its own default policy class.\n if not config[\"multiagent\"][\"policies\"]:\n assert default_policy is not None\n self._policy_class = default_policy\n # Query the function for a class to use.\n else:\n self._policy_class = get_policy_class(config)\n # If None returned, use default policy (must be provided).\n if self._policy_class is None:\n assert default_policy is not None\n self._policy_class = default_policy\n\n if before_init:\n before_init(self)\n\n # Creating all workers (excluding evaluation workers).\n self.workers = self._make_workers(\n env_creator=env_creator,\n validate_env=validate_env,\n policy_class=self._policy_class,\n config=config,\n num_workers=self.config[\"num_workers\"])\n self.execution_plan = execution_plan\n self.train_exec_impl = execution_plan(self.workers, config)\n\n if after_init:\n after_init(self)\n\n @override(Trainer)\n def step(self):\n res = next(self.train_exec_impl)\n return res\n\n @override(Trainer)\n def _before_evaluate(self):\n if before_evaluate_fn:\n before_evaluate_fn(self)\n\n @override(Trainer)\n def __getstate__(self):\n state = Trainer.__getstate__(self)\n state[\"train_exec_impl\"] = (\n self.train_exec_impl.shared_metrics.get().save())\n return state\n\n @override(Trainer)\n def __setstate__(self, state):\n Trainer.__setstate__(self, state)\n self.train_exec_impl.shared_metrics.get().restore(\n state[\"train_exec_impl\"])\n\n @staticmethod\n @override(Trainer)\n def with_updates(**overrides) -> Type[Trainer]:\n \"\"\"Build a copy of this trainer class with the specified overrides.\n\n Keyword Args:\n overrides (dict): use this to override any of the arguments\n originally passed to build_trainer() for this policy.\n\n Returns:\n Type[Trainer]: A the Trainer sub-class using `original_kwargs`\n and `overrides`.\n\n Examples:\n >>> MyClass = SomeOtherClass.with_updates({\"name\": \"Mine\"})\n >>> issubclass(MyClass, SomeOtherClass)\n ... False\n >>> issubclass(MyClass, Trainer)\n ... True\n \"\"\"\n return build_trainer(**dict(original_kwargs, **overrides))\n\n trainer_cls.__name__ = name\n trainer_cls.__qualname__ = name\n return trainer_cls", "def trainer_fn(hparams, schema):\n # Number of nodes in the first layer of the DNN\n first_dnn_layer_size = 100\n num_dnn_layers = 4\n dnn_decay_factor = 0.7\n\n train_batch_size = 40\n eval_batch_size = 40\n\n tf_transform_output = tft.TFTransformOutput(hparams.transform_output)\n\n train_input_fn = lambda: _input_fn( # pylint: disable=g-long-lambda\n hparams.train_files,\n tf_transform_output,\n batch_size=train_batch_size)\n\n eval_input_fn = lambda: _input_fn( # pylint: disable=g-long-lambda\n hparams.eval_files,\n tf_transform_output,\n batch_size=eval_batch_size)\n\n train_spec = tf.estimator.TrainSpec( # pylint: disable=g-long-lambda\n train_input_fn,\n max_steps=hparams.train_steps)\n\n serving_receiver_fn = lambda: _example_serving_receiver_fn( # pylint: disable=g-long-lambda\n tf_transform_output, schema)\n\n exporter = tf.estimator.FinalExporter('chicago-taxi', serving_receiver_fn)\n eval_spec = tf.estimator.EvalSpec(\n eval_input_fn,\n steps=hparams.eval_steps,\n exporters=[exporter],\n name='chicago-taxi-eval')\n\n run_config = tf.estimator.RunConfig(\n save_checkpoints_steps=999, keep_checkpoint_max=1)\n\n run_config = run_config.replace(model_dir=hparams.serving_model_dir)\n\n estimator = _build_estimator(\n # Construct layers sizes with exponetial decay\n hidden_units=[\n max(2, int(first_dnn_layer_size * dnn_decay_factor**i))\n for i in range(num_dnn_layers)\n ],\n config=run_config,\n warm_start_from=hparams.warm_start_from)\n\n # Create an input receiver for TFMA processing\n receiver_fn = lambda: _eval_input_receiver_fn( # pylint: disable=g-long-lambda\n tf_transform_output, schema)\n\n return {\n 'estimator': estimator,\n 'train_spec': train_spec,\n 'eval_spec': eval_spec,\n 'eval_input_receiver_fn': receiver_fn\n }", "def train_epoch(self, epoch, evaluator, optimizer, perf_path, perf_trace, state_fpath, writer_tensorboard):\n\n # Train an epoch\n self.model.train()\n print('Start epoch', epoch)\n train_itr = iter(self.loader_train)\n total_err = 0\n total_acc = 0\n\n for index, (data_pixel, data_labels) in enumerate(train_itr):\n\n # compute\n input_data = data_pixel.float().cuda()\n data_labels = data_labels.cuda()\n\n # Use the model the produce the classification\n grapheme_logits, vowel_logits, consonant_logits = self.model(input_data)\n\n # produce evaluator results\n eval_result = evaluator(grapheme_logits, vowel_logits, consonant_logits, data_labels)\n\n # set optimizer to zero.\n optimizer.zero_grad()\n\n # back propogate the evaluation results.\n eval_result['loss'].backward()\n\n # optimizer take step forward.\n optimizer.step()\n\n # tabulate the steps from the evaluation\n eval_result = {k: eval_result[k].item() for k in eval_result}\n\n # update every hundreds' of\n if index % 100 == 0:\n print(index, eval_result['loss'], eval_result['acc'])\n train_result = evaluator.evalulate_on_cache()\n train_total_err = train_result['loss']\n writer_tensorboard.add_scalar('Loss/Train', train_total_err, global_step=epoch)\n # log_metric('loss', train_total_err)\n train_total_acc = train_result['acc']\n writer_tensorboard.add_scalar('Accuracy/Train', train_total_acc, global_step=epoch)\n # log_metric('acc', train_total_acc)\n train_kaggle_score = train_result['kaggle_score']\n writer_tensorboard.add_scalar('Kaggle_Score/Train', train_kaggle_score, global_step=epoch)\n # log_metric('kaggle_score', train_kaggle_score)\n dict_metrics_train = {\n 'Loss/Train': train_total_err,\n 'Accuracy/Train': train_total_acc,\n 'Kaggle_Score/Train': train_kaggle_score,\n }\n log_metrics(dict_metrics_train, step=epoch)\n print(f\"Epoch {epoch} Training, Loss {train_total_err}, Acc {train_total_acc}\")\n evaluator.clear_cache()\n # compute validation error\n self.model.eval()\n val_itr = iter(self.loader_val)\n with torch.no_grad():\n for index, (data_pixel, data_labels) in enumerate(val_itr):\n input_data = data_pixel.float().cuda()\n data_labels = data_labels.cuda()\n grapheme_logits, vowel_logits, consonant_logits = self.model(input_data)\n eval_result = evaluator(grapheme_logits, vowel_logits, consonant_logits, data_labels)\n eval_result = {k: eval_result[k].item() for k in eval_result}\n total_err += eval_result['loss']\n total_acc += eval_result['acc']\n # print(total_err / (1 + input_index), total_acc / (1 + input_index))\n val_result = evaluator.evalulate_on_cache()\n val_total_err = val_result['loss']\n writer_tensorboard.add_scalar('Loss/Val', val_total_err, global_step=epoch)\n val_total_acc = val_result['acc']\n writer_tensorboard.add_scalar('Accuracy/Val', val_total_acc, global_step=epoch)\n val_kaggle_score = val_result['kaggle_score']\n writer_tensorboard.add_scalar('Kaggle_Score/Val', val_kaggle_score, global_step=epoch)\n dict_metrics_val = {\n 'Loss/Validation': val_total_err,\n 'Accuracy/Validation': val_total_acc,\n 'Kaggle_Score/Validation': val_kaggle_score,\n }\n log_metrics(dict_metrics_val, step=epoch)\n # Write to disk.\n writer_tensorboard.flush()\n print(f\"Epoch {epoch} Eval, Loss {val_total_err}, Acc {val_total_acc}\")\n evaluator.clear_cache()\n print(\"Saving the model (epoch %d)\" % epoch)\n torch.save({\n \"epoch\": epoch + 1,\n \"model_state\": self.model.state_dict(),\n \"optimizer_state\": optimizer.state_dict(),\n }, state_fpath)\n print(f\"Making a backup (step {epoch})\")\n backup_fpath = os.path.join(self.backup_dir, f\"model_bak_{epoch}.pt\")\n torch.save({\n \"epoch\": epoch + 1,\n \"model_state\": self.model.state_dict(),\n \"optimizer_state\": optimizer.state_dict(),\n }, backup_fpath)\n # Dump the traces\n perf_trace.append(\n {\n 'epoch': epoch,\n 'train_err': train_total_err,\n 'train_acc': train_total_acc,\n 'train_kaggle_score': train_kaggle_score,\n 'val_err': val_total_err,\n 'val_acc': val_total_acc,\n 'val_kaggle_score': val_kaggle_score\n }\n )\n pickle.dump(perf_trace, open(perf_path, 'wb'))\n # store epoch full result separately\n epoch_result = {\n 'epoch': epoch,\n 'train_result': train_result,\n 'val_result': val_result\n }\n pickle.dump(epoch_result, open(os.path.join(self.results_dir, 'result_epoch_{0}.p'.format(epoch)), 'wb'))", "def create_trainer(config: Config, device: torch.device, split: Split,\n own_split: bool) -> Tuple[Any, ...]:\n ret_type = Tuple[Tensor, Tensor, float]\n\n def output_transform(x: Tensor, y: Tensor,\n y_pred: Tensor, loss: Tensor) -> ret_type:\n \"\"\"What trainer returns to metrics at each step\"\"\"\n return y_pred, y, loss.item()\n\n model, optimizer_fn, criterion, checkpoint = config.model_config\n model = model.to(device)\n optimizer = optimizer_fn(model.parameters())\n if optimizer.__class__ in update_functions:\n update_function = update_functions[optimizer.__class__]\n update = update_function(model, optimizer, criterion, device,\n output_transform, prepare_batch)\n trainer = Engine(update)\n else:\n trainer = create_supervised_trainer(model, optimizer, criterion, device,\n prepare_batch=prepare_batch,\n output_transform=output_transform)\n if checkpoint is not None:\n info(f'Resume from {checkpoint}')\n obj = torch.load(str(checkpoint))\n model.load_state_dict(obj['model'])\n optimizer.load_state_dict(obj['optimizer'])\n trainer.load_state_dict(obj['trainer'])\n if not own_split:\n split = Split.load_state_dict(obj['split'])\n return model, optimizer, criterion, split, trainer", "def build_trainer(restore_state=None, train_policies=None, config=None):\n \n print(\"Using config\")\n print(config)\n cls = PPOTrainer\n trainer = cls(config=config)\n env = trainer.workers.local_worker().env\n if restore_state is not None:\n trainer.restore_from_object(restore_state)\n return trainer", "def __init__(self, iaa, lr: float = 0.01, optimizer: str = \"sgd\",\n loss_weights: list = None, cmd_line=None,\n validation: bool = False,\n loss_criterion: str = 'mce') -> None:\n self.validation = validation\n self.network = None\n self.iaa = iaa\n\n if loss_weights is None:\n # To avoid mutable default values\n loss_weights = [0.005, 0.3000, 0.695]\n # Now normalize loss weights to account for 3x multiplication of\n # penalized weights\n # This was removed because I think it may have caused nan errors\n # loss_weights = [weight / 4 for weight in loss_weights]\n\n if torch.cuda.is_available():\n # Check if cuda is available and use it if it is\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(\"cpu\")\n warnings.warn(\"CUDA compatible GPU not found. Running on CPU\",\n ResourceWarning)\n\n # Parameters\n self.lr = lr\n self.optimizer = optimizer\n\n # Set the loss criterion according to the recommended for pixel-wise\n # classification. We use weights so that missing curbs\n # will be more heavily penalized\n loss_weights = torch.tensor(loss_weights,\n dtype=torch.float).to(device=self.device)\n\n if loss_criterion == 'mce':\n self.criterion = MCELoss(weight_normal=loss_weights,\n weight_penalized=3 * loss_weights)\n if loss_criterion == 'ce':\n self.criterion = CrossEntropyLoss(weight=loss_weights)\n\n # Create UI\n if cmd_line:\n self.ui = TrainingCmd(cmd_line)\n else:\n self.ui = TrainingGUI()\n\n self.cmd_line = cmd_line\n\n # Set the status file path variable\n self.status_file_path = None\n\n # Creates logging tracker\n self.tracker = PlotCSV()", "def __call__(self, inputs, training):\n\n\t\treturn self._build_network(inputs, training)", "def main(_) -> None:\n params = train_utils.parse_configuration(FLAGS)\n mode = FLAGS.mode\n model_dir = FLAGS.model_dir\n if 'train' in FLAGS.mode:\n # Pure eval modes do not output yaml files. Otherwise continuous eval job\n # may race against the train job for writing the same file.\n train_utils.serialize_config(params, model_dir)\n\n if FLAGS.seed is not None:\n logging.info('Setting tf seed.')\n tf.random.set_seed(FLAGS.seed)\n\n task = RankingTask(\n params=params.task,\n optimizer_config=params.trainer.optimizer_config,\n logging_dir=model_dir,\n steps_per_execution=params.trainer.steps_per_loop,\n name='RankingTask')\n\n enable_tensorboard = params.trainer.callbacks.enable_tensorboard\n\n strategy = distribute_utils.get_distribution_strategy(\n distribution_strategy=params.runtime.distribution_strategy,\n all_reduce_alg=params.runtime.all_reduce_alg,\n num_gpus=params.runtime.num_gpus,\n tpu_address=params.runtime.tpu)\n\n with strategy.scope():\n model = task.build_model()\n\n def get_dataset_fn(params):\n return lambda input_context: task.build_inputs(params, input_context)\n\n train_dataset = None\n if 'train' in mode:\n train_dataset = strategy.distribute_datasets_from_function(\n get_dataset_fn(params.task.train_data),\n options=tf.distribute.InputOptions(experimental_fetch_to_device=False))\n\n validation_dataset = None\n if 'eval' in mode:\n validation_dataset = strategy.distribute_datasets_from_function(\n get_dataset_fn(params.task.validation_data),\n options=tf.distribute.InputOptions(experimental_fetch_to_device=False))\n\n if params.trainer.use_orbit:\n with strategy.scope():\n checkpoint_exporter = train_utils.maybe_create_best_ckpt_exporter(\n params, model_dir)\n trainer = RankingTrainer(\n config=params,\n task=task,\n model=model,\n optimizer=model.optimizer,\n train='train' in mode,\n evaluate='eval' in mode,\n train_dataset=train_dataset,\n validation_dataset=validation_dataset,\n checkpoint_exporter=checkpoint_exporter)\n\n train_lib.run_experiment(\n distribution_strategy=strategy,\n task=task,\n mode=mode,\n params=params,\n model_dir=model_dir,\n trainer=trainer)\n\n else: # Compile/fit\n checkpoint = tf.train.Checkpoint(model=model, optimizer=model.optimizer)\n\n latest_checkpoint = tf.train.latest_checkpoint(model_dir)\n if latest_checkpoint:\n checkpoint.restore(latest_checkpoint)\n logging.info('Loaded checkpoint %s', latest_checkpoint)\n\n checkpoint_manager = tf.train.CheckpointManager(\n checkpoint,\n directory=model_dir,\n max_to_keep=params.trainer.max_to_keep,\n step_counter=model.optimizer.iterations,\n checkpoint_interval=params.trainer.checkpoint_interval)\n checkpoint_callback = keras_utils.SimpleCheckpoint(checkpoint_manager)\n\n time_callback = keras_utils.TimeHistory(\n params.task.train_data.global_batch_size,\n params.trainer.time_history.log_steps,\n logdir=model_dir if enable_tensorboard else None)\n callbacks = [checkpoint_callback, time_callback]\n\n if enable_tensorboard:\n tensorboard_callback = tf.keras.callbacks.TensorBoard(\n log_dir=model_dir,\n update_freq=min(1000, params.trainer.validation_interval),\n profile_batch=FLAGS.profile_steps)\n callbacks.append(tensorboard_callback)\n\n num_epochs = (params.trainer.train_steps //\n params.trainer.validation_interval)\n current_step = model.optimizer.iterations.numpy()\n initial_epoch = current_step // params.trainer.validation_interval\n\n eval_steps = params.trainer.validation_steps if 'eval' in mode else None\n\n if mode in ['train', 'train_and_eval']:\n logging.info('Training started')\n history = model.fit(\n train_dataset,\n initial_epoch=initial_epoch,\n epochs=num_epochs,\n steps_per_epoch=params.trainer.validation_interval,\n validation_data=validation_dataset,\n validation_steps=eval_steps,\n callbacks=callbacks,\n )\n model.summary()\n logging.info('Train history: %s', history.history)\n elif mode == 'eval':\n logging.info('Evaluation started')\n validation_output = model.evaluate(validation_dataset, steps=eval_steps)\n logging.info('Evaluation output: %s', validation_output)\n else:\n raise NotImplementedError('The mode is not implemented: %s' % mode)", "def train(train_dataset: torch.utils.data.Dataset, test_dataset: torch.utils.data.Dataset,\n training_config: dict = train_config, global_config: dict = global_config):\n\n for path in global_config.values():\n create_dirs(path)\n\n # wrap datasets with Dataloader classes\n train_loader = torch.utils.data.DataLoader(train_dataset,\n **training_config[\"DATA_LOADER_CONFIG\"])\n test_loader = torch.utils.data.DataLoader(test_dataset,\n **training_config[\"DATA_LOADER_CONFIG\"])\n\n # model name & paths\n name = \"_\".join([train_config[\"DATE\"], train_config[\"SESSION_NAME\"]])\n modelpath = os.path.join(global_config[\"WEIGHT_DIR\"], name)\n\n # instantiate model\n model = training_config[\"MODEL\"](**training_config[\"MODEL_CONFIG\"])\n\n optimizer = training_config[\"OPTIMIZER\"](model.parameters(),\n **training_config[\"OPTIMIZER_CONFIG\"])\n\n # set up ignite engine\n training_config[\"METRICS\"].update({\"loss\" : Loss(training_config[\"LOSS\"])})\n trainer = create_supervised_trainer(model=model, optimizer=optimizer,\n loss_fn=training_config[\"LOSS\"],\n device=training_config[\"DEVICE\"])\n evaluator = create_supervised_evaluator(model,\n metrics=training_config[\"METRICS\"],\n device=training_config[\"DEVICE\"])\n\n\n # tensorboardX setup\n log_dir = os.path.join(global_config[\"LOG_DIR\"], \"tensorboardx\", name)\n create_dirs(log_dir)\n writer = SummaryWriter(logdir=log_dir)\n\n # log using the logging tool\n logger = log.Log(training_config, run_name=train_config['SESSION_NAME'])\n\n @trainer.on(Events.ITERATION_COMPLETED)\n def log_training(engine):\n iteration = (engine.state.iteration - 1) % len(train_loader) + 1\n writer.add_scalar(\"training/loss\", engine.state.output, engine.state.iteration)\n if iteration % 4 == 0:\n print(\"\\repoch[{}] iteration[{}/{}] loss: {:.2f} \".format(engine.state.epoch,\n iteration, len(train_loader),\n engine.state.output), end=\"\")\n\n # generic evaluation function\n def evaluate(engine, loader):\n evaluator.run(loader)\n metrics = evaluator.state.metrics\n return metrics\n\n # training data metrics\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_training_results(engine):\n print(\"\\ntraining results - epoch {}\".format(engine.state.epoch))\n metrics = evaluate(engine, train_loader)\n print(metrics)\n for key, value in metrics.items():\n logger.log_metric(key, value)\n writer.add_scalar(\"training/avg_{}\".format(key), value, engine.state.epoch)\n\n # test data metrics\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_validation_results(engine):\n print(\"test results - epoch {}\".format(engine.state.epoch))\n metrics = evaluate(engine, test_loader)\n print(metrics)\n for key, value in metrics.items():\n writer.add_scalar(\"validation/avg_{}\".format(key), value, engine.state.epoch)\n\n # model checkpointing\n @trainer.on(Events.EPOCH_COMPLETED)\n def model_checkpoint(engine):\n torch.save(model.state_dict(), modelpath + \".pth\")\n print(\"Checkpoint saved to {}\".format(modelpath + \".pth\"))\n\n # training iteration\n try:\n trainer.run(train_loader, max_epochs=training_config[\"EPOCHS\"])\n except KeyboardInterrupt:\n torch.save(model.state_dict(), modelpath + \".pth\")\n print(\"Model saved to {}\".format(modelpath + \".pth\"))\n raise KeyboardInterrupt\n\n # write weights\n torch.save(model.state_dict(), modelpath + \".pth\")\n\n # write csv log file\n log_content = training_config.copy()\n evaluator.run(test_loader)\n log_content[\"VAL_METRICS\"] = evaluator.state.metrics\n log_path = os.path.join(global_config[\"LOG_DIR\"], training_config[\"LOGFILE\"])\n write_log(log_path, log_content)\n\n logger.end_run()\n \n return evaluator.state.metrics[\"training/avg_loss\"]", "def __init__(self, net, data_set, num_epochs, **kwargs): \n self.net = net\n self.data_set = data_set\n self.num_epochs = num_epochs\n \n # Get hyper-parameters, set defaults\n learning_rate = kwargs.get('learning_rate', 0.001)\n self.batch_size = kwargs.get('batch_size', 128)\n\n # Use cross-entropy loss since we're doing multi-class classification\n self.criterion = kwargs.get('criterion', nn.CrossEntropyLoss())\n\n # Use Adam optimizer, because it's faster than classic gradient descent.\n adam_betas = kwargs.get('adam_betas', (0.9, 0.999))\n self.optimizer = optim.Adam(net.parameters(), lr=learning_rate, betas=adam_betas)\n \n # Construct data loader\n shuffle = kwargs.get('shuffle', True)\n sampler = kwargs.get('sampler', None)\n self.data_loader = DataLoader(\n data_set,\n batch_size=self.batch_size,\n shuffle=shuffle,\n sampler=sampler,\n num_workers=3\n )", "def build_task_trainer(unfixed_params):\n logdir = unfixed_params['logdir']\n gpu_ids_abs = unfixed_params[\"gpu_ids_abs\"]\n depth = unfixed_params[\"depth\"]\n lr = unfixed_params[\"lr\"]\n\n batch_size = 32\n opt_name = \"RMSprop\"\n lr_decay = 0.94\n decay_position= 1\n position_type = \"epoch\"\n weight_decay = 2e-5\n momentum = 0\n nepochs = 100\n num_class = 10\n torch.backends.cudnn.benchmark = True\n mnist = FashionMNIST(root=\"datasets/fashion_data\", batch_size=batch_size, num_workers=2)\n net = Model(SimpleModel(depth), gpu_ids_abs=gpu_ids_abs, init_method=\"kaiming\", verbose=False)\n opt = Optimizer(net.parameters(), opt_name, lr_decay, decay_position, position_type=position_type,\n lr=lr, weight_decay=weight_decay, momentum=momentum)\n Trainer = FashionClassTrainer(logdir, nepochs, gpu_ids_abs, net, opt, mnist, num_class)\n return Trainer", "def create_training(self, weight_spacing = 1.0, weight_bending = 1.0, weight_head_tail = 1.0):\n self.skeleton = tf.placeholder(\"float32\", [1, self.skeleton_size, 2]);\n #self.skeleton_valid = tf.placeholder(\"int32\", [1]);\n #self.head_tail = tf.placeholder(\"float32\", [1, self.head_tail_size, 2]);\n #self.head_tail_valid = tf.placeholder(\"int32\", [1]);\n \n #self.cost = self.create_cost(self.output, self.skeleton, self.skeleton_valid, self.head_tail, self.head_tail_valid, \n # weight_spacing = weight_spacing, weight_bending = weight_bending); #, weight_head_tail = weight_head_tail);\n self.cost = self.create_cost(self.output, self.skeleton, weight_spacing = weight_spacing, weight_bending = weight_bending); \n \n #trainer\n self.trainer = tf.train.RMSPropOptimizer(0.00025,0.99,0.0,1e-6).minimize(self.cost)", "def train(self) -> None:\n\n # Check if in the saved model path there is already a trained model\n if self.config.TRN_HYPERP[\"save_path\"]:\n if tf.saved_model.contains_saved_model(self.config.TRN_HYPERP[\"save_path\"]):\n print(\"INFO: An existing saved model will be used for inference\\n\")\n else:\n params = {**self.config.TRN_HYPERP, **self.config.DATASET_HYPERP}\n trainer = Trainer(**params)\n\n print(f\"INFO: Starting training ... \\n\")\n start_time = time.time()\n trainer.train()\n print(f\"\\nINFO: Training completed in {round((time.time() - start_time)/60, 2)} minutes.\\n\")\n\n # Instantiate the saved translator for inference\n saved_path = self.config.TRN_HYPERP[\"save_path\"]\n self.saved_translator = tf.saved_model.load(saved_path)\n else:\n print(\"INFO: Path to save model wasn't provided in config file. Can't train the model\\n\")", "def train_network(self, data_x, data_y, optimizer_generator, num_epochs, batch_size, out_name, validation_split = 0.05, shuffle_data = True, loss=\"mse\", metrics=None, center_out=True): \n training_process = multiprocessing.Process(target = self.train_network_process, args = (data_x, data_y, optimizer_generator, num_epochs, batch_size, out_name, validation_split, shuffle_data, loss, metrics, center_out))\n training_process.start()\n history = training_process.join()\n \n # Set model source to out_name so that next operation will run on the new model\n self.model_source = out_name\n \n return history", "def train(settings=None):\n if not settings:\n settings = Settings()\n train_transform = torchvision.transforms.Compose([transforms.RandomlySelectPatchAndRescale(),\n transforms.RandomHorizontalFlip(),\n transforms.NegativeOneToOneNormalizeImage(),\n transforms.NumpyArraysToTorchTensors()])\n validation_transform = torchvision.transforms.Compose([transforms.RandomlySelectPatchAndRescale(),\n transforms.NegativeOneToOneNormalizeImage(),\n transforms.NumpyArraysToTorchTensors()])\n\n train_dataset = CrowdDatasetWithUnlabeled(settings.train_dataset_path, 'train', transform=train_transform)\n train_dataset_loader = torch.utils.data.DataLoader(train_dataset, batch_size=settings.batch_size, shuffle=True,\n num_workers=settings.number_of_data_loader_workers)\n validation_dataset = CrowdDataset(settings.validation_dataset_path, 'validation', transform=validation_transform)\n validation_dataset_loader = torch.utils.data.DataLoader(validation_dataset, batch_size=settings.batch_size,\n shuffle=False,\n num_workers=settings.number_of_data_loader_workers)\n\n gan = GAN()\n gpu(gan)\n D = gan.D\n G = gan.G\n discriminator_optimizer = Adam(D.parameters())\n generator_optimizer = Adam(G.parameters())\n\n step = 0\n epoch = 0\n\n if settings.load_model_path:\n d_model_state_dict, d_optimizer_state_dict, epoch, step = load_trainer(prefix='discriminator',\n settings=settings)\n D.load_state_dict(d_model_state_dict)\n discriminator_optimizer.load_state_dict(d_optimizer_state_dict)\n discriminator_optimizer.param_groups[0].update({'lr': settings.learning_rate, 'weight_decay': settings.weight_decay})\n if settings.load_model_path:\n g_model_state_dict, g_optimizer_state_dict, _, _ = load_trainer(prefix='generator',\n settings=settings)\n G.load_state_dict(g_model_state_dict)\n generator_optimizer.load_state_dict(g_optimizer_state_dict)\n generator_optimizer.param_groups[0].update({'lr': settings.learning_rate})\n\n running_scalars = defaultdict(float)\n validation_running_scalars = defaultdict(float)\n running_example_count = 0\n datetime_string = datetime.datetime.now().strftime(\"y%Ym%md%dh%Hm%Ms%S\")\n trial_directory = os.path.join(settings.log_directory, settings.trial_name + ' ' + datetime_string)\n os.makedirs(trial_directory, exist_ok=True)\n summary_writer = SummaryWriter(os.path.join(trial_directory, 'train'))\n validation_summary_writer = SummaryWriter(os.path.join(trial_directory, 'validation'))\n print('Starting training...')\n step_time_start = datetime.datetime.now()\n while epoch < settings.number_of_epochs:\n for examples, unlabeled_examples in train_dataset_loader:\n unlabeled_images = unlabeled_examples[0]\n # Real image discriminator processing.\n discriminator_optimizer.zero_grad()\n images, labels, _ = examples\n images, labels = Variable(gpu(images)), Variable(gpu(labels))\n current_batch_size = images.data.shape[0]\n predicted_labels, predicted_counts = D(images)\n real_feature_layer = D.feature_layer\n density_loss = torch.abs(predicted_labels - labels).pow(settings.loss_order).sum(1).sum(1).mean()\n count_loss = torch.abs(predicted_counts - labels.sum(1).sum(1)).pow(settings.loss_order).mean()\n loss = count_loss + (density_loss * 10)\n loss.backward()\n running_scalars['Labeled/Loss'] += loss.data[0]\n running_scalars['Labeled/Count Loss'] += count_loss.data[0]\n running_scalars['Labeled/Density Loss'] += density_loss.data[0]\n running_scalars['Labeled/Count ME'] += (predicted_counts - labels.sum(1).sum(1)).mean().data[0]\n # Unlabeled.\n _ = D(gpu(images))\n labeled_feature_layer = D.feature_layer\n _ = D(gpu(Variable(unlabeled_images)))\n unlabeled_feature_layer = D.feature_layer\n unlabeled_loss = feature_distance_loss(unlabeled_feature_layer, labeled_feature_layer,\n scale=False) * settings.unlabeled_loss_multiplier\n unlabeled_loss.backward()\n # Fake.\n _ = D(gpu(Variable(unlabeled_images)))\n unlabeled_feature_layer = D.feature_layer\n z = torch.from_numpy(MixtureModel([norm(-settings.mean_offset, 1), norm(settings.mean_offset, 1)]).rvs(\n size=[current_batch_size, 100]).astype(np.float32))\n # z = torch.randn(settings.batch_size, noise_size)\n fake_examples = G(gpu(Variable(z)))\n _ = D(fake_examples.detach())\n fake_feature_layer = D.feature_layer\n fake_loss = feature_distance_loss(unlabeled_feature_layer, fake_feature_layer,\n order=1).neg() * settings.fake_loss_multiplier\n fake_loss.backward()\n # Feature norm loss.\n _ = D(gpu(Variable(unlabeled_images)))\n unlabeled_feature_layer = D.feature_layer\n feature_norm_loss = (unlabeled_feature_layer.norm(dim=1).mean() - 1).pow(2)\n feature_norm_loss.backward()\n # Gradient penalty.\n if settings.gradient_penalty_on:\n alpha = gpu(Variable(torch.rand(2)))\n alpha = alpha / alpha.sum(0)\n interpolates = (alpha[0] * gpu(Variable(unlabeled_images, requires_grad=True)) +\n alpha[1] * gpu(Variable(fake_examples.detach().data, requires_grad=True)))\n _ = D(interpolates)\n interpolates_predictions = D.feature_layer\n gradients = torch.autograd.grad(outputs=interpolates_predictions, inputs=interpolates,\n grad_outputs=gpu(torch.ones(interpolates_predictions.size())),\n create_graph=True, only_inputs=True)[0]\n gradient_penalty = ((gradients.norm(dim=1) - 1) ** 2).mean() * settings.gradient_penalty_multiplier\n gradient_penalty.backward()\n # Discriminator update.\n discriminator_optimizer.step()\n # Generator.\n if step % 1 == 0:\n generator_optimizer.zero_grad()\n _ = D(gpu(Variable(unlabeled_images)))\n unlabeled_feature_layer = D.feature_layer.detach()\n z = torch.randn(current_batch_size, 100)\n fake_examples = G(gpu(Variable(z)))\n _ = D(fake_examples)\n fake_feature_layer = D.feature_layer\n generator_loss = feature_distance_loss(unlabeled_feature_layer, fake_feature_layer)\n generator_loss.backward()\n generator_optimizer.step()\n\n running_example_count += images.size()[0]\n if step % settings.summary_step_period == 0 and step != 0:\n comparison_image = viewer.create_crowd_images_comparison_grid(cpu(images), cpu(labels),\n cpu(predicted_labels))\n summary_writer.add_image('Comparison', comparison_image, global_step=step)\n fake_images_image = torchvision.utils.make_grid(fake_examples.data[:9], nrow=3)\n summary_writer.add_image('Fake', fake_images_image, global_step=step)\n print('\\rStep {}, {}...'.format(step, datetime.datetime.now() - step_time_start), end='')\n step_time_start = datetime.datetime.now()\n for name, running_scalar in running_scalars.items():\n mean_scalar = running_scalar / running_example_count\n summary_writer.add_scalar(name, mean_scalar, global_step=step)\n running_scalars[name] = 0\n running_example_count = 0\n for validation_examples in validation_dataset_loader:\n images, labels, _ = validation_examples\n images, labels = Variable(gpu(images)), Variable(gpu(labels))\n predicted_labels, predicted_counts = D(images)\n density_loss = torch.abs(predicted_labels - labels).pow(settings.loss_order).sum(1).sum(1).mean()\n count_loss = torch.abs(predicted_counts - labels.sum(1).sum(1)).pow(settings.loss_order).mean()\n count_mae = torch.abs(predicted_counts - labels.sum(1).sum(1)).mean()\n count_me = (predicted_counts - labels.sum(1).sum(1)).mean()\n validation_running_scalars['Labeled/Density Loss'] += density_loss.data[0]\n validation_running_scalars['Labeled/Count Loss'] += count_loss.data[0]\n validation_running_scalars['Test/Count MAE'] += count_mae.data[0]\n validation_running_scalars['Labeled/Count ME'] += count_me.data[0]\n comparison_image = viewer.create_crowd_images_comparison_grid(cpu(images), cpu(labels),\n cpu(predicted_labels))\n validation_summary_writer.add_image('Comparison', comparison_image, global_step=step)\n for name, running_scalar in validation_running_scalars.items():\n mean_scalar = running_scalar / len(validation_dataset)\n validation_summary_writer.add_scalar(name, mean_scalar, global_step=step)\n validation_running_scalars[name] = 0\n step += 1\n epoch += 1\n if epoch != 0 and epoch % settings.save_epoch_period == 0:\n save_trainer(trial_directory, D, discriminator_optimizer, epoch, step, prefix='discriminator')\n save_trainer(trial_directory, G, generator_optimizer, epoch, step, prefix='generator')\n save_trainer(trial_directory, D, discriminator_optimizer, epoch, step, prefix='discriminator')\n save_trainer(trial_directory, G, generator_optimizer, epoch, step, prefix='generator')\n print('Finished Training')\n return trial_directory", "def __init__(self, network, model_prior, priors, simulators, n_obs, loss, summary_stats=None, optimizer=None,\n learning_rate=0.0005, checkpoint_path=None, max_to_keep=5, clip_method='global_norm', clip_value=None):\n\n assert len(priors) == len(simulators), 'The number of priors should equal the number of simulators.'\n\n # Basic attributes\n self.network = network\n self.model_prior = model_prior\n self.priors = priors\n self.simulators = simulators\n self.n_obs = n_obs\n self.loss = loss\n self.summary_stats = summary_stats\n self.n_models = len(priors)\n self.clip_method = clip_method\n self.clip_value = clip_value\n \n # Optimizer settings\n if optimizer is None:\n if tf.__version__.startswith('1'):\n self.optimizer = tf.train.AdamOptimizer(learning_rate)\n else:\n self.optimizer = Adam(learning_rate)\n else:\n self.optimizer = optimizer(learning_rate)\n\n # Checkpoint settings\n if checkpoint_path is not None:\n self.checkpoint = Checkpoint(optimizer=self.optimizer, model=self.network)\n self.manager = CheckpointManager(self.checkpoint, checkpoint_path, max_to_keep=max_to_keep)\n self.checkpoint.restore(self.manager.latest_checkpoint)\n if self.manager.latest_checkpoint:\n print(\"Networks loaded from {}\".format(self.manager.latest_checkpoint))\n else:\n print(\"Initializing networks from scratch.\")\n else:\n self.checkpoint = None\n self.manager = None\n self.checkpoint_path = checkpoint_path\n\n # TODO - make sure forward inference goes through", "def runner_decrator(cls):\n\n def custom_build_evaluator(cls, cfg, dataset_name, dataset, output_folder=None):\n \"\"\"\n Create evaluator(s) for a given dataset.\n This uses the special metadata \"evaluator_type\" associated with each builtin dataset.\n For your own dataset, you can simply create an evaluator manually in your\n script and do not have to worry about the hacky if-else logic here.\n \"\"\"\n dump_train = cfg.GLOBAL.DUMP_TRAIN\n return build_evaluator(cfg, dataset_name, dataset, output_folder, dump=dump_train)\n\n def custom_test_with_TTA(cls, cfg, model):\n # In the end of training, run an evaluation with TTA\n # Only support some R-CNN models.\n logger.info(\"Running inference with test-time augmentation ...\")\n model = GeneralizedRCNNWithTTA(cfg, model)\n res = cls.test(cfg, model, output_folder=os.path.join(cfg.OUTPUT_DIR, \"inference_TTA\"))\n res = OrderedDict({k + \"_TTA\": v for k, v in res.items()})\n return res\n\n cls.build_evaluator = classmethod(custom_build_evaluator)\n cls.test_with_TTA = classmethod(custom_test_with_TTA)\n\n return cls", "def build_graph(\n loss_module_fn,\n learner_fn,\n trainer_class,\n np_global_step=None, # pylint: disable=unused-argument\n):\n local_device, remote_device, index_remote_device =\\\n device_utils.get_local_remote_device_fn(FLAGS.ps_tasks)\n with tf.device(local_device):\n with tf.device(remote_device):\n global_step = tf.train.get_or_create_global_step()\n\n loss_module = loss_module_fn()\n\n learner, theta_mod = learner_fn(\n loss_module=loss_module,\n remote_device=remote_device,\n )\n\n trainer = trainer_class(\n local_device=local_device,\n remote_device=remote_device,\n index_remote_device=index_remote_device,\n learner=learner,\n )\n\n truncated_trainer_endpoints = trainer.build_endpoints()\n\n trainable_theta_vars = theta_mod.get_variables(\n tf.GraphKeys.TRAINABLE_VARIABLES)\n\n logging.info(\"GOT %d trainable variables\", len(trainable_theta_vars))\n\n # The following is a sort of accounting of variables. It ensures variables\n # are where you think they are, one is not creating extras.\n # Variable management in distributed setting has caused great\n # issues in the past.\n # While verbose, this seems to mitigate a lot of it by being very explicit\n # and throwing errors.\n local_vars = list(learner.get_variables(tf.GraphKeys.GLOBAL_VARIABLES))\n local_vars += list(\n learner.loss_module.get_variables(tf.GraphKeys.GLOBAL_VARIABLES))\n\n local_vars += trainer.get_local_variables()\n\n saved_remote_vars = list(\n theta_mod.get_variables(tf.GraphKeys.GLOBAL_VARIABLES))\n saved_remote_vars += [global_step] + trainer.get_saved_remote_variables()\n\n not_saved_remote_vars = trainer.get_not_saved_remote_variables()\n # TODO(lmetz) remove this line. For now, the meta_opt places variables in\n # the wrong scopes\n saved_remote_vars = list(set(saved_remote_vars))\n\n all_remote_vars = saved_remote_vars + not_saved_remote_vars\n\n logging.info(\"Remote Saved Variables\")\n for v in saved_remote_vars:\n logging.info(\" %s\\t\\t %s \\t %s\", v.shape.as_list(), v.device,\n v.op.name)\n\n logging.info(\"Remote Not Saved Variables\")\n for v in not_saved_remote_vars:\n logging.info(\" %s\\t\\t %s \\t %s\", v.shape.as_list(), v.device,\n v.op.name)\n\n logging.info(\"Local Variables\")\n for v in local_vars:\n logging.info(\" %s\\t\\t %s \\t %s\", v.shape.as_list(), v.device,\n v.op.name)\n\n logging.info(\"Trainable Theta Variables\")\n for v in theta_mod.get_variables(tf.GraphKeys.TRAINABLE_VARIABLES):\n logging.info(\" %s\\t\\t %s \\t %s\", v.shape.as_list(), v.device,\n v.op.name)\n\n device_utils.check_variables_accounting(local_vars, all_remote_vars)\n device_utils.check_variables_are_local(local_vars)\n device_utils.check_variables_are_remote(all_remote_vars)\n\n chief_summary_op = tf.summary.merge([\n tf.summary.scalar(\"global_step\", global_step),\n ])\n\n # Ops to run when a parameter server is reset.\n ps_was_reset_ops = [tf.initialize_variables(not_saved_remote_vars)]\n ps_was_reset_ops.append(trainer.ps_was_reset_op())\n ps_was_reset_op = tf.group(*ps_was_reset_ops, name=\"ps_was_reset_op\")\n\n if FLAGS.ps_tasks == 0:\n chief_device = \"\"\n else:\n chief_device = \"/job:chief\"\n with tf.device(chief_device):\n chief_is_ready = tf.get_variable(\n name=\"chief_is_ready\", initializer=tf.constant(False))\n set_chief_is_ready = chief_is_ready.assign(True)\n\n # this dictionary is result of / merged with trainer.\n return dict(\n global_step=global_step,\n chief_summary_op=chief_summary_op,\n saved_remote_vars=saved_remote_vars,\n remote_vars=all_remote_vars,\n local_vars=local_vars,\n chief_is_ready=chief_is_ready,\n set_chief_is_ready=set_chief_is_ready,\n trainer_trainer_ops=truncated_trainer_endpoints,\n ps_was_reset_op=ps_was_reset_op,\n )", "def main(tetrode_number=TETRODE_NUMBER,num_hidden_units=300,num_hidden_units_2=200,num_code_units=50):\n print(\"Loading the data...\")\n dataset = load_data(tetrode_number)\n print(\"Done!\")\n\n print(\"Tetrode number: {}, Num outputs: {}\".format(tetrode_number,dataset['output_dim']))\n\n print(dataset['input_shape'])\n print(dataset['output_dim'])\n \n print(\"Making the model...\")\n network = model(dataset['input_shape'],dataset['output_dim'],num_hidden_units,num_hidden_units_2,num_code_units,(4,1))\n print(\"Done!\")\n\n print(\"Setting up the training functions...\")\n training = funcs(dataset,network)\n print(\"Done!\")\n\n accuracies = []\n trainvalidation = []\n\n print(\"Begining to train the network...\")\n epochsDone = 0\n autoencoderSameLabels = []\n try:\n for i in range(NUM_EPOCHS):\n costs = []\n valid_costs = []\n\n for start, end in zip(range(0, dataset['num_examples_train'], BATCH_SIZE), range(BATCH_SIZE, dataset['num_examples_train'], BATCH_SIZE)):\n cost = training['train'](dataset['X_train'][start:end],dataset['y_train'][start:end])\n costs.append(cost)\n \n for start, end in zip(range(0, dataset['num_examples_valid'], BATCH_SIZE), range(BATCH_SIZE, dataset['num_examples_valid'], BATCH_SIZE)):\n cost = training['valid'](dataset['X_valid'][start:end],dataset['y_valid'][start:end])\n valid_costs.append(cost)\n\n\n meanValidCost = np.mean(np.asarray(valid_costs),dtype=np.float32) \n meanTrainCost = np.mean(np.asarray(costs,dtype=np.float32))\n accuracy = training['accuracy'](dataset['X_test'],dataset['y_test'])\n\n print(\"Epoch: {}, Accuracy: {}, Training cost / validation cost: {}\".format(i+1,accuracy,meanTrainCost/meanValidCost))\n\n if(np.isnan(meanTrainCost/meanValidCost)):\n print(\"Nan value\")\n break\n\n\n # this is the test to see if the autoencoder is learning how to \n if i%10==0:\n acs = []\n for j in range(dataset['caswells_dim']):\n # print(dataset['labeled_test'][j].shape)\n codes = training['code'](dataset['labeled_test'][j])\n np.mean(np.argmax(dataset['y_test'], axis=1) == np.argmax(training['predict'](dataset['X_test']), axis=1))\n format_codes = []\n for code in codes:\n # if(j==0):\n format_codes.append(np.argmax(code))\n\n prev = sorted(format_codes)[0]\n # print(sorted(format_codes))\n k = 0\n same = [1]\n for code in sorted(format_codes)[1:]:\n if(code == prev):\n same[k] = same[k] + 1\n else:\n k+=1\n same.append(1)\n prev = code\n\n same = np.asarray(same)\n # print(same,np.argmax(same),same[np.argmax(same)],np.sum(same))\n label_acc = same[np.argmax(same)]*1.0/np.sum(same)\n acs.append(label_acc)\n print(\"Label: {}, Num examples: {}, Same label with autoencoder: {} \".format(j,dataset['labeled_test'][j].shape[0],label_acc))\n acs = np.asarray(acs)\n autoencoderSameLabels.append(np.mean(acs))\n print(\"Average agreement: {}\".format(np.mean(acs)))\n\n\n if i%50 == 0:\n ran = randint(0,dataset['num_examples_test']-20)\n now = datetime.datetime.now()\n for j in range(10):\n testing = [dataset['X_test'][ran]]\n # print(testing[0].shape)\n output = dataset['y_test'][ran].reshape((1, 200))[0]\n print(output)\n\n # print(np.arange(dataset['output_dim']))\n # print(output)\n prediction = training['predict'](testing)[0].reshape((1, 200))[0]\n print(prediction)\n # print(prediction)\n # print(testing[0][0])\n \n code = training['code'](testing).reshape((1, 50))\n\n # print(code)\n \n # plotting the figure\n\n fig = plt.figure(1)\n sub1 = fig.add_subplot(311)\n sub2 = fig.add_subplot(312)\n sub3 = fig.add_subplot(313)\n\n # add titles\n\n sub1.set_title('Desired output')\n sub2.set_title('Net output')\n sub3.set_title('Code layer output')\n\n # adding x labels\n\n sub1.set_xlabel('Time')\n sub2.set_xlabel('Time')\n sub3.set_xlabel('Code label')\n\n # adding y labels\n\n sub1.set_ylabel('Amplitude')\n sub2.set_ylabel('Amplitude')\n sub3.set_ylabel('Probability')\n\n # Plotting data\n\n # print(testing[0][0])\n # inp = []\n # for z in range(4):\n # inp += list(testing[0][0][z])\n\n\n sub1.plot(output)\n # sub1.bar(x_axis, output, width=1)\n sub1.grid(True)\n\n sub2.plot(prediction)\n sub2.grid(True)\n\n x_axis = list(np.arange(len(code[0])))\n\n # sub3.plot(code[0])\n sub3.bar(x_axis, code[0], width=1)\n # plt.show()\n\n fig.tight_layout()\n\n # plt.plot(var2)\n # fig.tight_layout()\n plt.savefig('../logs/convAuto/fig{}_{}_{}.png'.format(i,j,now), bbox_inches='tight')\n plt.close()\n \n ran += 1\n # break\n\n\n trainvalidation.append([meanTrainCost,meanValidCost])\n accuracies.append(accuracy)\n if(EARLY_STOPPING):\n if(len(accuracies) < STOPPING_RANGE):\n pass\n else:\n test = [k for k in accuracies if k < accuracy]\n if not test:\n print('Early stopping causing training to finish at epoch {}'.format(i+1))\n break\n del accuracies[0]\n accuracies.append(accuracy)\n\n epochsDone = epochsDone + 1\n\n except KeyboardInterrupt:\n pass\n\n # plt.plot(trainvalidation)\n # plt.show()\n\n if(LOG_EXPERIMENT):\n print(\"Logging the experiment details...\")\n log = dict(\n NET_TYPE = \"Conv auto encoder 2 hidden 1 code\",\n TETRODE_NUMBER = tetrode_number,\n BASENAME = BASENAME,\n NUM_EPOCHS = epochsDone,\n BATCH_SIZE = BATCH_SIZE,\n TRAIN_VALIDATION = trainvalidation,\n LEARNING_RATE = LEARNING_RATE,\n MOMENTUM = MOMENTUM,\n SAME_LABEL_AVERAGES = autoencoderSameLabels,\n ACCURACY = accuracies,\n NETWORK_LAYERS = [str(type(layer)) for layer in lasagne.layers.get_all_layers(network)],\n OUTPUT_DIM = dataset['output_dim'],\n # NETWORK_PARAMS = lasagne.layers.get_all_params_values(network)\n )\n now = datetime.datetime.now()\n filename = \"experiments/convAuto/{}_{}_{}_NUMLAYERS_{}_OUTPUTDIM_{}\".format(now,NUM_EPOCHS,NUM_HIDDEN_UNITS,len(log['NETWORK_LAYERS']),log['OUTPUT_DIM'])\n filename = re.sub(\"[^A-Za-z0-9_/,-:]\", \"\", filename)\n with open(filename,\"w\") as outfile:\n outfile.write(str(log))", "def forward(opt):\n my_utils.plant_seeds(randomized_seed=opt.randomize)\n os.makedirs(opt.output_dir, exist_ok=True)\n\n trainer = t.Trainer(opt)\n trainer.build_dataset_train_for_matching()\n trainer.build_dataset_test_for_matching()\n trainer.build_network()\n trainer.build_losses()\n trainer.network.eval()\n\n if opt.eval_list and os.path.isfile(opt.eval_list):\n source_target_files = np.loadtxt(opt.eval_list, dtype=str)\n source_target_files = source_target_files.tolist()\n for i, st in enumerate(source_target_files):\n source, target = st\n cat1, fname1 = source.split('/')\n fname1 = os.path.splitext(fname1)[0]\n cat2, fname2 = target.split('/')\n fname2 = os.path.splitext(fname2)[0]\n if len(opt.shapenetv1_path) > 0:\n source_target_files[i] = (os.path.join(opt.shapenetv1_path, cat1, fname1, \"model.obj\"), os.path.join(opt.shapenetv1_path, cat2, fname2, \"model.obj\"))\n elif len(opt.shapenetv2_path) > 0:\n source_target_files[i] = (os.path.join(opt.shapenetv2_path, cat1, fname1, \"models\", \"model_normalized.obj\"), os.path.join(opt.shapenetv2_path, cat2, fname2, \"models\", \"model_normalized.obj\"))\n elif (opt.eval_source != \"\" and opt.eval_source[-4:] == \".txt\") and (opt.eval_target != \"\" and opt.eval_target[-4:] == \".txt\"):\n source_target_files = [(figure_2_3.convert_path(opt.shapenetv1_path, opt.eval_source), figure_2_3.convert_path(opt.shapenetv1_path, opt.eval_target))]\n\n rot_mat = get_3D_rot_matrix(1, np.pi/2)\n rot_mat_rev = get_3D_rot_matrix(1, -np.pi/2)\n isV2 = len(opt.shapenetv2_path) > 0\n for i, source_target in enumerate(source_target_files):\n basename = get_model_id(source_target[0], isV2) + \"-\" + get_model_id(source_target[1], isV2)\n path_deformed = os.path.join(opt.output_dir, basename + \"-Sab.ply\")\n path_source = os.path.join(opt.output_dir, basename + \"-Sa.ply\")\n path_target = os.path.join(opt.output_dir, basename +\"-Sb.ply\")\n\n mesh_path = source_target[0]\n print(mesh_path)\n source_mesh_edge = get_shapenet_model.link(mesh_path)\n\n mesh_path = source_target[1]\n target_mesh_edge = get_shapenet_model.link(mesh_path)\n\n\n print(\"Deforming source in target\")\n\n source = source_mesh_edge.vertices\n target = target_mesh_edge.vertices\n\n pymesh.save_mesh_raw(path_source, source, source_mesh_edge.faces, ascii=True)\n pymesh.save_mesh_raw(path_target, target, target_mesh_edge.faces, ascii=True)\n\n if len(opt.shapenetv2_path) > 0:\n source = source.dot(rot_mat)\n target = target.dot(rot_mat)\n\n source = torch.from_numpy(source).cuda().float().unsqueeze(0)\n target = torch.from_numpy(target).cuda().float().unsqueeze(0)\n\n with torch.no_grad():\n source, _, _, _, _ = loss.forward_chamfer(trainer.network, source, target, local_fix=None,\n distChamfer=trainer.distChamfer)\n\n try:\n source = source.squeeze().cpu().detach().numpy()\n if len(opt.shapenetv2_path) > 0:\n source = source.dot(rot_mat_rev)\n P2_P1_mesh = pymesh.form_mesh(vertices=source, faces=source_mesh_edge.faces)\n pymesh.save_mesh(path_deformed, P2_P1_mesh, ascii=True)\n\n # print(\"computing signal tranfer form source to target\")\n # high_frequencies.high_frequency_propagation(path_source, path_deformed, path_target)\n except Exception as e:\n print(e)\n import pdb; pdb.set_trace()\n path_deformed = path_deformed[:-4] + \".pts\"\n save_pts(path_deformed, source.squeeze().cpu().detach().numpy())", "def train(train_features, train_labels, val_features, val_labels, network, optimizer, loss, config, log_date, log_timestamp):\n\n # prints the number of learnable parameters in the network\n count_parameters(network)\n\n # init network using weight initialization of choice\n network = init_weights(network)\n # send network to GPU\n network.to(config['gpu'])\n network.train()\n\n # if weighted loss chosen, calculate weights based on training dataset; else each class is weighted equally\n if config['use_weights']:\n class_weights = class_weight.compute_class_weight('balanced', classes=np.unique(train_labels + 1), y=train_labels + 1)\n if config['loss'] == 'cross_entropy':\n loss.weights = class_weights\n print('Applied weighted class weights: ')\n print(class_weights)\n else:\n class_weights = class_weight.compute_class_weight(None, classes=np.unique(train_labels + 1), y=train_labels + 1)\n if config['loss'] == 'cross_entropy':\n loss.weights = class_weights\n\n\n # initialize optimizer and loss\n opt, criterion = optimizer, loss\n\n if config['loss'] == 'maxup':\n maxup = Maxup(myNoiseAdditionAugmenter, ntrials=4)\n\n # initialize training and validation dataset, define DataLoaders\n dataset = torch.utils.data.TensorDataset(torch.from_numpy(train_features), torch.from_numpy(train_labels))\n trainloader = DataLoader(dataset, batch_size=config['batch_size'], shuffle=True)\n dataset = torch.utils.data.TensorDataset(torch.from_numpy(val_features).float(), torch.from_numpy(val_labels))\n valloader = DataLoader(dataset, batch_size=config['batch_size'], shuffle=False)\n\n # counters and objects used for early stopping and learning rate adjustment\n best_loss = np.inf\n best_network = None\n best_val_losses = None\n best_train_losses = None\n best_val_preds = None\n best_train_preds = None\n early_stop = False\n lr_pt_counter = 0\n es_pt_counter = 0\n\n # training loop; iterates through epochs\n for e in range(config['epochs']):\n \"\"\"\n TRAINING\n \"\"\"\n # helper objects\n train_preds = []\n train_gt = []\n train_losses = []\n start_time = time.time()\n batch_num = 1\n\n # iterate over train dataset\n for i, (x, y) in enumerate(trainloader):\n # send x and y to GPU\n inputs, targets = x.to(config['gpu']), y.to(config['gpu'])\n # zero accumulated gradients\n opt.zero_grad()\n\n if config['loss'] == 'maxup':\n # Increase the inputs via data augmentation\n inputs, targets = maxup(inputs, targets)\n\n # send inputs through network to get predictions, calculate loss and backpropagate\n train_output = network(inputs)\n\n if config['loss'] == 'maxup':\n # calculates loss\n train_loss = maxup.maxup_loss(train_output, targets.long())[0]\n else:\n train_loss = criterion(train_output, targets.long())\n\n train_loss.backward()\n opt.step()\n # append train loss to list\n train_losses.append(train_loss.item())\n\n # create predictions and append them to final list\n y_preds = np.argmax(train_output.cpu().detach().numpy(), axis=-1)\n y_true = targets.cpu().numpy().flatten()\n train_preds = np.concatenate((np.array(train_preds, int), np.array(y_preds, int)))\n train_gt = np.concatenate((np.array(train_gt, int), np.array(y_true, int)))\n\n # if verbose print out batch wise results (batch number, loss and time)\n if config['verbose']:\n if batch_num % config['print_freq'] == 0 and batch_num > 0:\n cur_loss = np.mean(train_losses)\n elapsed = time.time() - start_time\n print('| epoch {:3d} | {:5d} batches | ms/batch {:5.2f} | '\n 'train loss {:5.2f}'.format(e, batch_num, elapsed * 1000 / config['batch_size'], cur_loss))\n start_time = time.time()\n batch_num += 1\n\n # plot gradient flow if wanted\n if config['save_gradient_plot']:\n plot_grad_flow(network)\n\n \"\"\"\n VALIDATION\n \"\"\"\n\n # helper objects\n val_preds = []\n val_gt = []\n val_losses = []\n\n # set network to eval mode\n network.eval()\n with torch.no_grad():\n # iterate over validation dataset\n for i, (x, y) in enumerate(valloader):\n # send x and y to GPU\n inputs, targets = x.to(config['gpu']), y.to(config['gpu'])\n\n if config['loss'] == 'maxup':\n # Increase the inputs via data augmentation\n inputs, targets = maxup(inputs, targets)\n\n # send inputs through network to get predictions, loss and calculate softmax probabilities\n val_output = network(inputs)\n if config['loss'] == 'maxup':\n # calculates loss\n val_loss = maxup.maxup_loss(val_output, targets.long())[0]\n else:\n val_loss = criterion(val_output, targets.long())\n\n val_output = torch.nn.functional.softmax(val_output, dim=1)\n\n # append validation loss to list\n val_losses.append(val_loss.item())\n\n # create predictions and append them to final list\n y_preds = np.argmax(val_output.cpu().numpy(), axis=-1)\n y_true = targets.cpu().numpy().flatten()\n val_preds = np.concatenate((np.array(val_preds, int), np.array(y_preds, int)))\n val_gt = np.concatenate((np.array(val_gt, int), np.array(y_true, int)))\n\n # print epoch evaluation results for train and validation dataset\n print(\"EPOCH: {}/{}\".format(e + 1, config['epochs']),\n \"Train Loss: {:.4f}\".format(np.mean(train_losses)),\n \"Train Acc: {:.4f}\".format(jaccard_score(train_gt, train_preds, average='macro')),\n \"Train Prec: {:.4f}\".format(precision_score(train_gt, train_preds, average='macro')),\n \"Train Rcll: {:.4f}\".format(recall_score(train_gt, train_preds, average='macro')),\n \"Train F1: {:.4f}\".format(f1_score(train_gt, train_preds, average='macro')),\n \"Val Loss: {:.4f}\".format(np.mean(val_losses)),\n \"Val Acc: {:.4f}\".format(jaccard_score(val_gt, val_preds, average='macro')),\n \"Val Prec: {:.4f}\".format(precision_score(val_gt, val_preds, average='macro')),\n \"Val Rcll: {:.4f}\".format(recall_score(val_gt, val_preds, average='macro')),\n \"Val F1: {:.4f}\".format(f1_score(val_gt, val_preds, average='macro')))\n\n # if chosen, print the value counts of the predicted labels for train and validation dataset\n if config['print_counts']:\n y_train = np.bincount(train_preds)\n ii_train = np.nonzero(y_train)[0]\n y_val = np.bincount(val_preds)\n ii_val = np.nonzero(y_val)[0]\n print('Predicted Train Labels: ')\n print(np.vstack((ii_train, y_train[ii_train])).T)\n print('Predicted Val Labels: ')\n print(np.vstack((ii_val, y_val[ii_val])).T)\n\n # if adjust learning rate is enabled\n if config['adj_lr'] or config['early_stopping']:\n if best_loss < np.mean(val_losses):\n lr_pt_counter += 1\n es_pt_counter += 1\n\n # adjust learning rate check\n if lr_pt_counter >= config['adj_lr_patience'] and config['adj_lr']:\n config['lr'] *= 0.1\n for param_group in opt.param_groups:\n param_group['lr'] = param_group['lr'] * 0.1\n print('Changing learning rate to {} since no loss improvement over {} epochs.'\n .format(config['lr'], str(lr_pt_counter)))\n\n # early stopping check\n if es_pt_counter >= config['es_patience'] and config['early_stopping']:\n print('Stopping training early since no loss improvement over {} epochs.'\n .format(str(es_pt_counter)))\n early_stop = True\n # print results of best epoch\n print('Final (best) results: ')\n print(\"Train Loss: {:.4f}\".format(np.mean(best_train_losses)),\n \"Train Acc: {:.4f}\".format(jaccard_score(train_gt, best_train_preds, average='macro')),\n \"Train Prec: {:.4f}\".format(precision_score(train_gt, best_train_preds, average='macro')),\n \"Train Rcll: {:.4f}\".format(recall_score(train_gt, best_train_preds, average='macro')),\n \"Train F1: {:.4f}\".format(f1_score(train_gt, best_train_preds, average='macro')),\n \"Val Loss: {:.4f}\".format(np.mean(best_val_losses)),\n \"Val Acc: {:.4f}\".format(jaccard_score(val_gt, best_val_preds, average='macro')),\n \"Val Prec: {:.4f}\".format(precision_score(val_gt, best_val_preds, average='macro')),\n \"Val Rcll: {:.4f}\".format(recall_score(val_gt, best_val_preds, average='macro')),\n \"Val F1: {:.4f}\".format(f1_score(val_gt, best_val_preds, average='macro')))\n\n else:\n lr_pt_counter = 0\n es_pt_counter = 0\n best_network = network\n best_loss = np.mean(val_losses)\n best_train_losses = train_losses\n best_train_preds = train_preds\n best_val_losses = val_losses\n best_val_preds = val_preds\n else:\n best_network = network\n best_train_losses = train_losses\n best_train_preds = train_preds\n best_val_losses = val_losses\n best_val_preds = val_preds\n\n # set network to train mode again\n network.train()\n\n if early_stop:\n break\n\n # if plot_gradient gradient plot is shown at end of training\n if config['save_gradient_plot']:\n mkdir_if_missing(os.path.join('logs', log_date, log_timestamp))\n plt.savefig(os.path.join('logs', log_date, log_timestamp, 'grad_flow.png'))\n\n # return validation, train and test predictions as numpy array with ground truth\n return best_network, np.vstack((best_val_preds, val_gt)).T, np.vstack((best_train_preds, train_gt)).T" ]
[ "0.6460203", "0.6075577", "0.6028676", "0.59938025", "0.58702856", "0.58700705", "0.5863667", "0.58252823", "0.57971066", "0.5793705", "0.5755463", "0.57455766", "0.5744249", "0.5733688", "0.56948066", "0.568793", "0.56833005", "0.56637686", "0.5651451", "0.5650193", "0.562782", "0.56082183", "0.5591325", "0.55793977", "0.55593365", "0.5559227", "0.55577874", "0.5540396", "0.5537266", "0.55328935" ]
0.7394186
0
Runs train and test set alternately for a given number of epochs. epochs Number of epochs to run the loop for. events A list of events to use in training/testing. Instances of RunnerEvent invoke the runner events, instances of OptimizerEvent and SamplerEvent are also invoked in the optimizer and sampler objects. collect_all_times Training statistics collect every latency of optimizer and executor steps. Training statistics for all epochs.
def run_loop(self, epochs, events: List[TrainingEvent] = None, collect_all_times: bool = False) -> TrainingStatistics: # Create statistics object stats = TrainingStatistics(self.train_set.batch_size, (0 if self.test_set is None else self.test_set.batch_size)) # Set and distribute events if events is None: events = DefaultTrainerEvents(epochs) if collect_all_times: events.append(SummaryGeneratorInferenceEvent(stats)) else: events.append(SummaryGeneratorEvent(stats)) executor_events = [e for e in events if isinstance(e, ExecutorEvent)] optimizer_events = [e for e in events if isinstance(e, OptimizerEvent)] sampler_events = [e for e in events if isinstance(e, SamplerEvent)] events = [e for e in events if isinstance(e, RunnerEvent)] # Append events to executor and samplers self.executor.events.extend(executor_events) self.train_set.events.extend(sampler_events) if self.test_set is not None: self.test_set.events.extend(sampler_events) try: for event in events: event.before_training(self, stats) # Run test set prior to training self._test_accuracy(stats, events) for epoch in range(epochs): for event in events: event.before_epoch(epoch, self, stats) self._train(stats, events, optimizer_events) self._test_accuracy(stats, events) for event in events: event.after_epoch(epoch, self, stats) except (StopIteration, StopTraining): pass # If stopping was requested for event in events: event.after_training(self, stats) # Remove events from executor and samplers del self.executor.events[-len(executor_events):] del self.train_set.events[-len(sampler_events):] if self.test_set is not None: del self.test_set.events[-len(sampler_events):] return stats
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_and_evaluate(model, train_dataloader, test_dataloader, optimizer, scheduler, loss_fn, total_epochs):\n\n for epoch in range(total_epochs):\n\n # Run one epoch for both train and test\n print(\"Epoch {}/{}\".format(epoch + 1, total_epochs))\n\n # compute number of batches in one epoch(one full pass over the training set)\n train(model, optimizer, loss_fn, train_dataloader, epoch)\n \n scheduler.step()\n\n # Evaluate for one epoch on test set\n eval(model, loss_fn, test_dataloader, epoch)", "def train(self, epochs):\n print('Starting training...')\n print('\\n{:13} '\n '{:>17} '\n '{:^38}'\n ''.format('', '--- Training ---', '--- Validation ---'))\n print('{:4} {:>8} '\n '{:>8} {:>8} '\n '{:>8} {:>8} {:>8} {:>8}'\n ''.format('', '', 'Loss', 'Acc', 'Loss', 'Prc', 'Rec', 'Acc'))\n training_time = 0\n for epoch in range(1, epochs + 1):\n start_time = time.time()\n trn_stats = self.__progress(self.training, self.__train_fn)\n val_stats = self.__progress(self.validation, self.__val_fn)\n elapsed_time = time.time() - start_time\n training_time += elapsed_time\n print('{:>4} {:>7.2f}s '\n '{:>8.3f} {:>8.1%} '\n '{:>8.3f} {:>8.1%} {:>8.1%} {:>8.1%}'\n ''.format(epoch, elapsed_time,\n trn_stats[0], trn_stats[-1],\n *val_stats))\n self.history.append([epoch] + list(trn_stats) + list(val_stats))\n self.report['epochs'] = epochs\n self.report['time_per_epoch'] = training_time / epochs", "def loop(self, epochs, train_loader, val_loader, test_loader):\n\n self.all_epoch = epochs\n self._resume()\n\n for ep in range(self.cur_epoch, epochs + 1):\n self.cur_epoch = ep\n\n # conduct training, validation and test\n self.train_loss = self.train(train_loader)\n if ep % self.val_freq == 0:\n self.val_loss = self.val(val_loader)\n\n if ep % self.test_freq == 0:\n self.test_loss, rho, nmse = self.test(test_loader)\n else:\n rho, nmse = None, None\n\n # conduct saving, visualization and log printing\n self._loop_postprocessing(rho, nmse)", "def epoch_train(tools, **kwargs):\n sess = tools.sess\n optimizer = tools.optimizer\n\n feed_dict = kwargs.get(\"feed_dict\", {})\n\n infos, summary, e, _ = sess.run(tools.infos, feed_dict=feed_dict)\n if config.VERBOSE_EACH:\n if not int(e) % config.VERBOSE_EACH:\n print(config.INFOMESSAGE(infos))\n sys.stdout.flush()\n else:\n print(config.INFOMESSAGE(infos))\n sys.stdout.flush()\n\n tools.reporter(summary, e)\n\n try:\n if not feed_dict:\n while True:\n sess.run(optimizer)\n else:\n while True:\n sess.run(optimizer, feed_dict=feed_dict)\n except tf.errors.OutOfRangeError:\n pass\n return infos", "def train_epoch(self):\r\n for loader in self.loaders:\r\n if self.epoch % loader.epoch_interval == 0:\r\n self.cycle_dataset(loader)\r\n\r\n self._stats_new_epoch()\r\n self._write_tensorboard()\r\n print('{}th epoch train / eval done!'.format(self.epoch))", "def run(num_epochs, encoded_dim):\n # for patient_ in get_patient_ids():\n for patient_ in ['16']:\n print(\"Starting on index: \" + str(patient_))\n training_ae(num_epochs, encoded_dim, patient_, True)\n print(\"Completed \" + str(patient_) + \" reconstruction and encoding, saved test data to assess performance\")", "def run_all_tasks(data_dir):\n print(\"Training and testing for all tasks ...\")\n for t in range(20):\n run_task(data_dir, task_id=t + 1)", "def run_training_and_tests(test_name, dataset, models, n_images = 1000, training_split = 0.7, \n n_training_images = None, n_test_images = None, \n n_iterations = 1, dimensions = (50, 50)):\n\n aggregate_metrics = {}\n\n # Run specified number of iterations\n for i in range(n_iterations):\n print(\"\\nTest iteration\", i+1)\n\n # Handle if specific training and test set size isn't given\n if (n_training_images is None):\n n_training_images = n_images * training_split\n if (n_test_images is None):\n n_test_images = n_images * (1-training_split)\n\n # Load training and test sets from single dataset\n train_data, train_labels, test_data, test_labels = image_utils.read_dataset(\n n_training_images, \n n_test_images, \n './datasets/' + dataset, \n dimensions[0], \n dimensions[1]\n )\n\n # Train and run tests for each model\n for model in models:\n print(\"Working with model '\" + model.label + \"'\")\n\n # Train model\n start = time.time()\n model.train(copy.deepcopy(train_data), train_labels)\n end = time.time()\n training_time = round(end - start, 3)\n\n # Run predictions on test set\n start = time.time()\n predicted = model.run(copy.deepcopy(test_data))\n end = time.time()\n test_time = round(end - start, 3)\n\n # Calculate metrics and store for aggregate calculations\n metrics = Metrics(test_labels, predicted, training_time, test_time)\n if model.label in aggregate_metrics:\n aggregate_metrics[model.label].append(metrics)\n else:\n aggregate_metrics[model.label] = [metrics]\n\n # Print results\n print(\"Results\\n\" + \"------\")\n print(str(metrics))\n\n # Save model\n filepath = \"./test/\" + test_name + \"/\" + model.label + \"/iteration\" + str(i+1) + \"/\"\n print(\"Saving model to '\" + filepath + model.label + \".joblib'\")\n os.makedirs(os.path.dirname(filepath), exist_ok = True)\n with open(filepath + model.label + '.joblib', 'wb') as file:\n dump(model, file)\n\n # Save results\n print(\"Saving results to '\" + filepath + \"results.txt'\\n\")\n with open(filepath + \"results.txt\", 'w') as file:\n file.write(str(metrics))\n\n # Calculate, print and write aggregate metrics\n print(\n 'Aggregate Results' + '\\n' +\n '-----------------'\n )\n for model in models:\n aggregate = combine_metrics(aggregate_metrics[model.label])\n print(model.label)\n print(aggregate)\n filepath = \"./test/\" + test_name + \"/\" + model.label + \"/\"\n print(\"Saving results to '\" + filepath + \"aggregate_results.txt'\" + \"\\n -- -\\n\")\n with open(filepath + \"aggregate_results.txt\", 'w') as file:\n file.write(str(aggregate))", "def train_models(self):\n\n #keep track on the number of iterations (needed to scale lambda)\n nr_iteration = 0\n \n for epoch in range(self.epochs):\n start = time.time()\n print()\n print(epoch + 1)\n print()\n for step, batch in enumerate(self.training_data):\n X_batch = normalize_images(tf.cast(batch[0], 'float32'))\n Y_batch = batch[1]\n Z_batch = self.ae_model.encode(X_batch)\n \n self.train_step_disc(Z_batch, Y_batch)\n # Call only one tf.function when tracing.\n #ADD LAMBDA SCHEDULE ACCORDING TO OUR EXPERIMENTS AND EPOCH LENGTH\n self.scale_lambda(self.lambda_e, nr_iteration)\n self.train_step_ae(X_batch, Y_batch, Z_batch)\n\n nr_iteration += 1\n end = time.time()\n print(\"Epoch \" + str(epoch + 1) + \" takes \" + str(end - start))", "def launch(self,\n train_data,\n validate_data,\n logger,\n epochs,\n start_epoch=0):\n\n for e in range(start_epoch, start_epoch + epochs):\n for name, loss in self.train(train_data).items():\n logger.record(name, loss, e)\n for name, loss in self.validate(validate_data).items():\n logger.record(name, loss, e)", "def launch(self,\n train_data,\n validate_data,\n logger,\n epochs,\n start_epoch=0):\n\n for e in range(start_epoch, start_epoch + epochs):\n for name, loss in self.train(train_data).items():\n logger.record(name, loss, e)\n for name, loss in self.validate(validate_data).items():\n logger.record(name, loss, e)", "def train(self, n_iterations, test_samples, test_labels, training_samples, training_labels):\n\n # Keep track of the running time for training the neural network\n start_time_network = time.time()\n\n # Train the neural network with the defined number of iterations on the training data batches\n all_training_loss = []\n all_test_loss = []\n for iteration in tqdm(range(n_iterations+1)):\n # The dataset create automatic batches, so there is no need to define the samples\n self.session.run(self.optimizer)\n # # Keep track of the training and test loss\n # training_loss = self.session.run(tf.reduce_mean(self.cost),\n # feed_dict={\"Input/BatchSamples:0\": training_samples,\n # \"Input/BatchLabels:0\": training_labels})\n # test_loss = self.session.run(tf.reduce_mean(self.cost), feed_dict={\"Input/BatchSamples:0\": test_samples,\n # \"Input/BatchLabels:0\": test_labels})\n # # Store the loss in percentages\n # all_training_loss.append(training_loss*100)\n # all_test_loss.append(test_loss*100)\n\n # Check for every 100th iteration the loss\n if iteration % 1000 == 0:\n training_cost = self.session.run(tf.reduce_mean(self.cost))\n print(\"STEP {} | Training cost: {:.4f}\".format(iteration, training_cost*100))\n test_accuracy = self.evaluate(evaluation_samples=test_samples, evaluation_labels=test_labels)\n print(\"\\t\\t Test accuracy: {:.2f}%\".format(test_accuracy[1]))\n\n # Get the total running time of the neural network\n network_run_time = time.time() - start_time_network\n\n return network_run_time, all_training_loss, all_test_loss", "def run_epoch(self):\n self.model_lr_scheduler.step()\n\n print(\"Training\")\n self.set_train()\n\n for batch_idx, inputs in enumerate(self.train_loader):\n\n before_op_time = time.time()\n\n outputs, losses = self.process_batch(inputs)\n\n self.model_optimizer.zero_grad()\n losses[\"loss\"].backward()\n self.model_optimizer.step()\n\n duration = time.time() - before_op_time\n\n # log less frequently after the first 2000 steps to save time & disk space\n early_phase = self.step < 2000\n late_phase = self.step % 2000 == 0\n\n if early_phase or late_phase:\n self.log_time(batch_idx, duration, losses[\"loss\"].cpu().data)\n\n if \"depth_gt\" in inputs:\n self.compute_depth_losses(inputs, outputs, losses)\n\n self.log(\"train\", inputs, outputs, losses)\n self.val()\n\n self.step += 1", "def train_loop(sess: Session,\n loss_updates: Sequence[Tensor],\n fetch,\n train_generators: Sequence[Generator],\n test_generators: Sequence[Generator],\n loss_ratios: Sequence[int]=None,\n test_every=100,\n num_iterations=100000,\n callbacks=None,\n **kwargs) -> Session:\n #pdb.set_trace()\n # Default 1 for loss_ratios and normalize\n loss_ratios = [1 for i in range(len(loss_updates))] if loss_ratios is None else loss_ratios\n loss_ratios = loss_ratios / np.sum(loss_ratios)\n callbacks = [] if callbacks is None else callbacks\n\n # Prepare dict to be passed to callbacks\n callback_dict = {}\n callback_dict.update(kwargs)\n callback_dict.update({'sess': sess})\n state = {}\n\n # Main loop\n for i in range(num_iterations):\n # Generate input\n curr_fetch = {}\n curr_fetch.update(fetch)\n curr_fetch[\"update_loss\"] = np.random.choice(loss_updates, p=loss_ratios)\n feed_dict = gen_feed_dict(train_generators)\n fetch_res = sess.run(curr_fetch, feed_dict=feed_dict)\n\n # Evaluate on test data every test_every iterations\n if test_generators is not None and (i % test_every == 0 or i == num_iterations - 1):\n test_feed_dict = gen_feed_dict(test_generators, True)\n test_fetch_res = sess.run(fetch, feed_dict=test_feed_dict)\n fetch_res['test_fetch_res'] = test_fetch_res\n if 'loss' in test_fetch_res:\n print(\"Test Loss\", test_fetch_res['loss'])\n if 'losses' in test_fetch_res:\n print(\"Test Losses\", test_fetch_res['losses'])\n\n # Do all call backs\n for cb in callbacks:\n cb(fetch_res, feed_dict, i, num_iterations=num_iterations, state=state, **callback_dict)\n print(\"Iteration: \", i)\n if 'loss' in fetch_res:\n print(fetch_res['loss'])\n if 'losses' in fetch_res:\n print(fetch_res['losses'])", "def run(self, epochs: int) -> Generator[EpochLog, None, None]:\n trainer = self.trainer\n logger = self.logger\n\n self._log_confs(logger)\n\n # warm-up E-steps\n if self._conf.warmup_Esteps > 0:\n pprint(\"Warm-up E-steps\")\n for e in range(self._conf.warmup_Esteps):\n compute_reconstruction = (\n self._conf.warmup_reco_epochs is not None and e in self._conf.warmup_reco_epochs\n )\n d = trainer.e_step(compute_reconstruction)\n self._log_epoch(logger, d)\n\n # log initial free energies (after warm-up E-steps if any)\n if self._conf.warmup_Esteps == 0:\n d = trainer.eval_free_energies()\n self._log_epoch(logger, d)\n yield EpochLog(epoch=0, results=d)\n\n # EM steps\n for e in range(epochs):\n start_t = time.time()\n compute_reconstruction = (\n self._conf.reco_epochs is not None and e in self._conf.reco_epochs\n )\n d = trainer.em_step(compute_reconstruction)\n epoch_runtime = time.time() - start_t\n self._log_epoch(logger, d)\n yield EpochLog(e + 1, d, epoch_runtime)\n\n # remove leftover \".old\" logfiles produced by the logger\n rank = dist.get_rank() if dist.is_initialized() else 0\n leftover_logfile = self._conf.output + \".old\"\n if rank == 0 and Path(leftover_logfile).is_file():\n os.remove(leftover_logfile)\n\n # put trainer into undefined state after the experiment is finished\n self.trainer = None # type: ignore", "def run_epoch(self):\n print(\"Training\")\n self.set_train()\n\n for batch_idx in range(0, self.num_total_batch):\n\n before_op_time = time.time()\n # Choosing the dataloader for training model\n if self.choosing_dataset_to_train_with(batch_idx):\n # Synthetic dataset\n self.syn_or_real = 'syn'\n try:\n inputs = self.syn_train_iter.__next__()\n except StopIteration:\n print('Stopped as the iteration has reached to the END, and reloading the synthetic dataloader')\n self.syn_train_iter = iter(self.syn_train_loader)\n inputs = self.syn_train_iter.__next__()\n else:\n # Real dataset\n self.syn_or_real = 'real'\n try:\n inputs = self.real_train_iter.__next__()\n except StopIteration:\n print('Stopped as the iteration has reached to the END, and reloading the real dataloader')\n self.real_train_iter = iter(self.real_train_loader)\n inputs = self.real_train_iter.__next__()\n\n # Move all available tensors to GPU memory\n for key, ipt in inputs.items():\n if type(key) == tuple or key == \"depth_gt\":\n inputs[key] = ipt.to(self.device)\n\n # log less frequently after the first 2000 steps to save time & disk space\n self.step += 1\n self.early_phase = batch_idx % self.opt.log_frequency == 0\n self.mid_phase = False and self.step % self.opt.save_frequency == 0\n self.late_phase = self.num_total_batch - 1 == batch_idx\n\n outputs, losses = {}, {}\n # Depth estimation\n outputs_d, losses_d = self.process_batch(inputs)\n outputs.update(outputs_d)\n losses.update(losses_d)\n\n # No more if else conditions, just combine all losses based on availability of gradients\n final_loss = torch.tensor(0.).to(self.device)\n for k, v in losses.items():\n if ('d_' not in k) and v.requires_grad and ('/' not in k):\n final_loss += v\n final_loss.backward()\n losses[\"loss\"] = final_loss\n\n if (batch_idx + 1) % 2 == 0:\n self.model_optimizer.step()\n self.model_optimizer.zero_grad()\n self.zero_grad()\n\n duration = time.time() - before_op_time\n self.log_time(batch_idx, duration, losses[\"loss\"].cpu().data)\n\n if \"depth_gt\" in inputs:\n self.compute_depth_losses(inputs, outputs, losses)\n\n if self.early_phase or self.mid_phase or self.late_phase:\n self.log(\"train\", inputs, outputs, losses)\n self.val(\"real\")\n self.val(\"syn\")\n\n if (batch_idx + 1) % 2 == 0:\n current_lr = self.update_learning_rate(self.model_optimizer, self.opt.learning_rate)", "def _training_loop(model, datasets, optimizer, loss_function, initial_epoch, epochs, callbacks,\n steps_per_epoch, train_on_batch, evaluate_model, metrics=[], weight_decay=0,\n evaluation_freq=1):\n tf.keras.backend.set_learning_phase(1)\n\n train_generators = [_to_infinite_iterator(d[0]) for d in datasets]\n valid_generators = [d[1] for d in datasets]\n\n for c in callbacks:\n c.on_train_begin(model)\n\n cumulative_batch_id = 0\n\n for epoch in range(initial_epoch, epochs):\n logger.info(f\"Start epoch {epoch}\")\n\n epoch_logs = {}\n\n for c in callbacks:\n c.on_epoch_begin(epoch, epoch_logs)\n for batch_id in range(steps_per_epoch):\n cumulative_batch_id += 1\n batch_logs = {}\n\n x_trains, y_trains, y_trains_a, y_trains_b, lams = [], [], [], [], []\n\n for dataset_id in range(len(datasets)):\n x_train, y_train = next(train_generators[dataset_id])\n\n x_trains.append(x_train)\n y_trains.append(y_train)\n\n if isinstance(x_train, dict):\n batch_logs.update({\"size:\" + str(dataset_id): len(list(x_train.values())[0])})\n else:\n batch_logs.update({\"size:\" + str(dataset_id): len(x_train)})\n\n for c in callbacks:\n c.on_batch_begin(batch=batch_id, logs=batch_logs)\n\n batch_logs_step = train_on_batch(model, optimizer, x_trains, y_trains,\n metrics,\n loss_function,\n weight_decay=weight_decay)\n\n batch_logs.update(batch_logs_step)\n for k in batch_logs:\n if hasattr(batch_logs[k], \"numpy\"):\n batch_logs[k] = batch_logs[k].numpy()\n\n # if isinstance(batch_logs[k], tf.Tensor):\n if (hasattr(batch_logs[k], 'ndim') and batch_logs[k].ndim > 0) or isinstance(batch_logs[k], list):\n batch_logs[k] = batch_logs[k] # .numpy()\n if isinstance(batch_logs[k], list):\n batch_logs[k] = np.array(batch_logs[k])\n else:\n batch_logs[k] = float(batch_logs[k])\n\n for c in callbacks:\n c.on_batch_end(batch=batch_id, logs=batch_logs)\n\n if evaluation_freq > 0 and (epoch % evaluation_freq == 0 or epoch == epochs - 1):\n tf.keras.backend.set_learning_phase(0)\n val_results = evaluate_model(model, valid_generators, loss_function, metrics)\n tf.keras.backend.set_learning_phase(1)\n for k, v in val_results.items():\n epoch_logs[f'val_{k}'] = v\n else:\n if evaluation_freq > 0:\n for k in previous_epoch_logs:\n if k not in epoch_logs:\n epoch_logs[k] = np.nan\n\n for c in callbacks:\n c.on_epoch_end(epoch, epoch_logs)\n\n logger.info('End of epoch {}, loss={}'.format(epoch, epoch_logs['loss:0']))\n\n previous_epoch_logs = dict(epoch_logs)\n\n for c in callbacks:\n c.on_train_end(model)", "def run_custom_training_tests():\n test_custom_training()\n test_custom_distributed_training()\n test_custom_multimodel_training()\n test_custom_distributed_multimodel_training()", "def train(self, iterations=1):\n for _ in range(iterations):\n self.trainer.train()\n self.test_network()", "def _train(self):\n training_environment = self._training_environment\n evaluation_environment = self._evaluation_environment\n policy = self._policy\n pool = self._pool\n\n if not self._training_started:\n self._init_training()\n\n self._initial_exploration_hook(\n training_environment, self._initial_exploration_policy, pool)\n\n self.sampler.initialize(training_environment, policy, pool)\n\n gt.reset_root()\n gt.rename_root('RLAlgorithm')\n gt.set_def_unique(False)\n\n self._training_before_hook()\n\n for self._epoch in gt.timed_for(range(self._epoch, self._n_epochs)):\n self._epoch_before_hook()\n gt.stamp('epoch_before_hook')\n\n start_samples = self.sampler._total_samples\n for i in count():\n samples_now = self.sampler._total_samples\n self._timestep = samples_now - start_samples\n\n if (samples_now >= start_samples + self._epoch_length\n and self.ready_to_train):\n break\n\n self._timestep_before_hook()\n gt.stamp('timestep_before_hook')\n\n self._do_sampling(timestep=self._total_timestep)\n gt.stamp('sample')\n\n if self.ready_to_train:\n self._do_training_repeats(timestep=self._total_timestep)\n gt.stamp('train')\n\n self._timestep_after_hook()\n gt.stamp('timestep_after_hook')\n\n training_paths = self.sampler.get_last_n_paths(math.ceil(self._epoch_length / self.sampler._max_path_length))\n gt.stamp('training_paths')\n evaluation_paths = self._evaluation_paths(policy, evaluation_environment)\n gt.stamp('evaluation_paths')\n\n training_metrics = self._evaluate_rollouts(training_paths, training_environment)\n gt.stamp('training_metrics')\n if evaluation_paths:\n evaluation_metrics = self._evaluate_rollouts(\n evaluation_paths, evaluation_environment)\n gt.stamp('evaluation_metrics')\n else:\n evaluation_metrics = {}\n\n self._epoch_after_hook(training_paths)\n gt.stamp('epoch_after_hook')\n\n sampler_diagnostics = self.sampler.get_diagnostics()\n\n diagnostics = self.get_diagnostics(\n iteration=self._total_timestep,\n batch=self._evaluation_batch(),\n training_paths=training_paths,\n evaluation_paths=evaluation_paths)\n\n time_diagnostics = gt.get_times().stamps.itrs\n\n diagnostics.update(OrderedDict((\n *(\n (f'evaluation/{key}', evaluation_metrics[key])\n for key in sorted(evaluation_metrics.keys())\n ),\n *(\n (f'training/{key}', training_metrics[key])\n for key in sorted(training_metrics.keys())\n ),\n *(\n (f'times/{key}', time_diagnostics[key][-1])\n for key in sorted(time_diagnostics.keys())\n ),\n *(\n (f'sampler/{key}', sampler_diagnostics[key])\n for key in sorted(sampler_diagnostics.keys())\n ),\n ('epoch', self._epoch),\n ('timestep', self._timestep),\n ('timesteps_total', self._total_timestep),\n ('train-steps', self._num_train_steps),\n )))\n\n if self._eval_render_kwargs and hasattr(\n evaluation_environment, 'render_rollouts'):\n # TODO(hartikainen): Make this consistent such that there's no\n # need for the hasattr check.\n training_environment.render_rollouts(evaluation_paths)\n\n yield diagnostics\n\n self.sampler.terminate()\n\n self._training_after_hook()\n\n yield {'done': True, **diagnostics}", "def train_online(self, epochs, iterations_per_epoch, batch_size, n_obs, **kwargs):\n \n losses = dict()\n for ep in range(1, epochs+1):\n losses[ep] = []\n with tqdm(total=iterations_per_epoch, desc='Training epoch {}'.format(ep)) as p_bar:\n for it in range(1, iterations_per_epoch+1):\n\n # Determine n_obs and generate data on-the-fly\n if type(n_obs) is int:\n n_obs_it = n_obs\n else:\n n_obs_it = n_obs()\n params, sim_data = self._forward_inference(batch_size, n_obs_it, **kwargs)\n\n # One step backprop\n loss = self._train_step(params, sim_data)\n\n # Store loss into dictionary\n losses[ep].append(loss)\n \n # Update progress bar\n p_bar.set_postfix_str(\"Epoch {0},Iteration {1},Loss: {2:.3f},Running Loss: {3:.3f}\"\n .format(ep, it, loss, np.mean(losses[ep])))\n p_bar.update(1)\n\n # Store after each epoch, if specified\n if self.manager is not None:\n self.manager.save()\n return losses", "def run_all_tests():\n remove_dbs()\n run_training_tests()\n run_custom_training_tests()\n run_training_save_tests()\n run_validation_tests()\n run_feature_extraction_tests()", "def run_epochs(self,\n fn: Callable[..., Optional[Dict[str, Any]]],\n data_generator: Iterable[ArrayTupleOrList],\n limit: Optional[int] = None,\n count: Optional[int] = None,\n metrics: Union[Sequence[str], type(ALL)] = NOT_SET,\n excludes: Sequence[str] = ()\n ) -> None:\n g = self.iter_epochs(limit=limit, count=count)\n try:\n for _ in g:\n self.run_batches(\n fn, data_generator, metrics=metrics, excludes=excludes)\n finally:\n g.close()", "def training_phase(self):\r\n self.train_dataloader = self.get_dataloader(\r\n hdf_path=self.train_h5_path,\r\n data_description=\"training set\"\r\n )\r\n self.valid_dataloader = self.get_dataloader(\r\n hdf_path=self.valid_h5_path,\r\n data_description=\"validation set\"\r\n )\r\n\r\n self.get_ts_properties()\r\n\r\n self.initialize_output_files()\r\n\r\n start_epoch, end_epoch = self.define_model_and_optimizer()\r\n\r\n print(\"* Beginning training.\", flush=True)\r\n n_processed_batches = 0\r\n for epoch in range(start_epoch, end_epoch):\r\n\r\n self.current_epoch = epoch\r\n n_processed_batches = self.train_epoch(n_processed_batches=n_processed_batches)\r\n\r\n # evaluate model every `sample_every` epochs (not every epoch)\r\n if epoch % self.C.sample_every == 0:\r\n self.evaluate_model()\r\n else:\r\n util.write_model_status(score=\"NA\") # score not computed\r\n\r\n self.print_time_elapsed()", "def fit(self, epochs: int):\n try:\n for callback in self.callbacks:\n self.epoch = callback.on_fit_start(self.model, self.train_data, self.val_data, self.loss, self.optimizer, self.epoch, epochs)\n\n for self.epoch in range(self.epoch, epochs):\n self.model.train()\n self.run_epoch(self.train_data, PHASE_TRAIN, self.epoch)\n self.model.eval()\n self.run_epoch(self.val_data, PHASE_VAL, self.epoch)\n except KeyboardInterrupt as e:\n for callback in self.callbacks:\n callback.on_fit_interruted(e)\n except Exception as e:\n for callback in self.callbacks:\n callback.on_fit_failed(e)\n raise e\n\n for callback in self.callbacks:\n callback.on_fit_end()", "def train_epoch(self):\n for it in range(self.iter_per_epoch):\n # Get batch\n xs, _ = self.mnist.train.next_batch(100)\n _, loss, summary = self.sess.run([self.train_op, self.loss, self.summary_op],\n {self.x: xs})\n self.summary_writer.add_summary(summary, it)\n if it % 1000 == 0:\n print('Iteration {}\\t loss: {}'.format(it, loss))", "def run_epoch(models, train_ldr, it, avg_loss, clipping):\r\n # TODO: make model timing wholly optional\r\n # model_t = 0.0\r\n # data_t = 0.0\r\n # end_t = time.time()\r\n tq = tqdm.tqdm(train_ldr)\r\n exp_w = 0.99\r\n overall_losses = defaultdict(list)\r\n grad_norms = defaultdict(float)\r\n for batch in tq:\r\n inputs, labels = list(batch)\r\n # start_t = time.time()\r\n for (feature_class, model, optimizer) in models:\r\n cur_avg_loss = avg_loss[feature_class]\r\n optimizer.zero_grad()\r\n losses = model.loss(inputs, labels[feature_class])\r\n combined_losses = torch.zeros_like(losses[feature_class])\r\n for feature_class, loss in losses.items():\r\n combined_losses += loss\r\n combined_losses.backward(torch.ones_like(combined_losses))\r\n grad_norm = nn.utils.clip_grad_norm_(model.parameters(), clipping)\r\n if(isnan(grad_norm)):\r\n raise ValueError(\"Norm of grad is not a number\")\r\n\r\n combined_losses = combined_losses.data[0]\r\n optimizer.step()\r\n # prev_end_t = end_t\r\n # end_t = time.time()\r\n # model_t += end_t - start_t\r\n # data_t += start_t - prev_end_t\r\n cur_avg_loss = exp_w * cur_avg_loss + (1 - exp_w) * combined_losses\r\n avg_loss[feature_class] = cur_avg_loss.item()\r\n grad_norms[feature_class] = grad_norm.item()\r\n overall_losses[feature_class].append(combined_losses.item())\r\n tq.set_postfix(avg_losses={feature: round(value,5) for feature, value in avg_loss.items()},\r\n grad_norms={feature: round(value,5) for feature, value in grad_norms.items()})\r\n # model_time=model_t, data_time=data_t)\r\n it += 1\r\n return it, avg_loss, overall_losses", "def run_tests(test_name, dataset, models, n_test_images = 120, dimensions = (50, 50)):\n\n # Load test data\n train_data, train_labels, test_data, test_labels = image_utils.read_dataset(\n 0, \n n_test_images, \n './datasets/' + dataset, \n dimensions[0], \n dimensions[1]\n )\n\n # Run test for each model\n for model in models:\n print(\"Working with model '\" + model.label + \"'\")\n\n # Run predictions on test set\n start = time.time()\n predicted = model.run(copy.deepcopy(test_data))\n end = time.time()\n test_time = round(end - start, 3)\n\n # Calculate metrics\n metrics = Metrics(test_labels, predicted, 0.0, test_time)\n\n # Print results\n print(\"Results\\n\" + \"------\")\n print(str(metrics))\n\n # Save results\n filepath = \"./test/\" + test_name + \"/\" + model.label + \"/\"\n os.makedirs(os.path.dirname(filepath), exist_ok = True)\n\n print(\"Saving results to '\" + filepath + \"results.txt'\\n\")\n with open(filepath + \"results.txt\", 'w') as file:\n file.write(str(metrics))", "def run_epoch(self, sess, epoch_num, validate=True):\n total_loss = 0\n accuracies = []\n for i in range(self.batches_per_epoch):\n batch = self.loader.get_batch()\n if self.config.print_every and i % self.config.print_every == 0:\n if validate:\n val_accuracy = self.eval_validation_accuracy()\n print(\"step {}, validation accuracy {:.3f}\".format(i, val_accuracy))\n accuracies.append((i + epoch_num * self.batches_per_epoch, val_accuracy))\n else:\n if self.include_coverage and self.include_entropy:\n train_accuracy = self.eval_accuracy_on_batch(batch[0], batch[1], batch[2], batch[3])\n elif self.include_coverage:\n train_accuracy = self.eval_accuracy_on_batch(batch[0], batch[1], batch[2])\n elif self.include_entropy:\n train_accuracy = self.eval_accuracy_on_batch(batch[0], batch[1], batch[2])\n else:\n train_accuracy = self.eval_accuracy_on_batch(batch[0], batch[1])\n print(\"step {}, training accuracy {:.3f}\".format(i, train_accuracy))\n \n if self.include_coverage and self.include_entropy:\n _, loss_val = sess.run([self.train_op, self.loss],\n feed_dict={self.x: batch[0], self.c: batch[1], self.e: batch[2], self.y_: batch[2], \n self.keep_prob: 1-self.config.dropout_prob})\n elif self.include_coverage:\n _, loss_val = sess.run([self.train_op, self.loss],\n feed_dict={self.x: batch[0], self.c: batch[1], self.y_: batch[2], \n self.keep_prob: 1-self.config.dropout_prob})\n elif self.include_entropy:\n _, loss_val = sess.run([self.train_op, self.loss],\n feed_dict={self.x: batch[0], self.e: batch[1], self.y_: batch[2], \n self.keep_prob: 1-self.config.dropout_prob})\n else:\n attention, _, loss_val = sess.run([self.attention, self.train_op, self.loss],\n feed_dict={self.x: batch[0], self.y_: batch[1],\n self.keep_prob: 1-self.config.dropout_prob})\n\t\tpdb.set_trace()\n\t\tnp.savetxt(\"a.csv\", attention[0], delimiter=\",\")\n total_loss += loss_val\n\n return total_loss / self.batches_per_epoch, accuracies", "def train(get_generator, loss_fn, optimizer, num_epochs=1, monitor=None,\n on_epoch_start=None, on_finish=None, on_error=None):\n logger = logging.getLogger(__name__)\n\n t_begin_train = timer()\n for epoch in range(num_epochs):\n avg_loss = 0\n if on_epoch_start:\n on_epoch_start(epoch)\n\n data_loader = get_generator(epoch)\n num_steps = len(data_loader)\n\n t_start = timer()\n for batch_idx, data in enumerate(data_loader):\n if batch_idx % 500 == 0:\n avg_loss /= num_steps\n if monitor:\n monitor({'epoch': epoch,\n 'train_loss': float(avg_loss),\n 'batch': batch_idx})\n avg_loss = 0.0\n\n optimizer.zero_grad() # this is not done automatically in torch\n # pass all data args to loss_function\n loss = loss_fn(data)\n\n if torch.isnan(loss):\n logging.error(f\"Loss is nan on batch {batch_idx} and epoch \"\n f\"{epoch}. Raising floating point error\")\n if on_error:\n on_error(data)\n raise FloatingPointError\n\n loss.backward()\n optimizer.step()\n\n avg_loss += loss.data.cpu().numpy()\n\n if batch_idx % 200 == 99:\n t_end = timer()\n logger.debug(f\"{batch_idx}/{len(data_loader)} batches done. \"\n f\"Rate: {200/(t_end-t_start):.2f} batch/sec\")\n t_start = timer()\n\n logging.info(\"Done Training. Time elapsed {}\".format(timer()-t_begin_train))\n if on_finish:\n on_finish()" ]
[ "0.6826375", "0.6805796", "0.67925835", "0.65626717", "0.64048594", "0.6282162", "0.62598425", "0.6251268", "0.62364304", "0.6235307", "0.6235307", "0.6232785", "0.62240577", "0.6211177", "0.620107", "0.6187016", "0.61487633", "0.61325216", "0.61206204", "0.6118335", "0.60593104", "0.60520625", "0.6047031", "0.6033384", "0.60313135", "0.59992784", "0.5987384", "0.59868634", "0.59778005", "0.597574" ]
0.8474212
0
Compute and return summary statistics from data.
def summarize(self, data): return self.summary(data).flatten()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_summarystat(self, data):\n if isinstance(data, pd.DataFrame):\n ma_values = self.kernel_transformer.transform(\n data, masker=self.masker, return_type=\"array\"\n )\n elif isinstance(data, list):\n ma_values = self.masker.transform(data)\n elif isinstance(data, np.ndarray):\n ma_values = data.copy()\n else:\n raise ValueError(f\"Unsupported data type '{type(data)}'\")\n\n stat_values = 1.0 - np.prod(1.0 - ma_values, axis=0)\n return stat_values", "def collect_data_stats(data):\n # We re-import this module here because this code will run\n # remotely.\n \n stats_0 = data.shape[0]\n stats_1 = data.sum(axis=0)\n stats_2 = (data**2).sum(axis=0)\n retval = (\n stats_0,\n stats_1,\n stats_2\n )\n return retval", "def dataStats(self):\n print (\"Performing statistical analysis of the data\")\n # stuff to do", "def compute_query_summary_statistics(data):\n return {\n nameserver: _compute_summary_stats(entries)\n for nameserver, entries in data.items()\n }", "def get_summary_stats(self):\r\n n = len(self.results)\r\n\r\n if n == 0:\r\n mean = None\r\n stdev = None\r\n\r\n elif n == 1:\r\n mean = numpy.mean(self.results)\r\n stdev = None\r\n\r\n else:\r\n mean = numpy.mean(self.results)\r\n stdev = numpy.std(self.results)\r\n\r\n sum_stats = {'n': n, 'mean': mean, 'stdev': stdev}\r\n\r\n return sum_stats", "def summarize_rec_data(data):\n\n # Warning: not all collectible data has a summary stats implemented below!\n # See get_rec_stats() above!\n\n stats = {}\n\n if 'hc_ro' in data:\n # Entropy across HC units average over samples.\n hc_ro_arr = np.array(list(data['hc_ro'].values()))\n stats['H HC ro'] = utils.entropy(hc_ro_arr.T).mean()\n\n if 'vs_state' in data:\n # Sum of vS reward estimates change (from first to last sample).\n vs_state = data['vs_state']\n stats['d vS'] = sum(vs_state[max(vs_state.keys())] - vs_state[0])\n\n if 'co_occs' in data:\n # Mean entropy of real location and HC state co-occurance frequencies.\n co_occs = data['co_occs'][max(data['co_occs'].keys())]\n stats['H HC co'] = np.nanmean(get_hc_co_occ_entropy(co_occs))\n stats['H loc co'] = np.nanmean(get_loc_co_occ_entropy(co_occs))\n\n return stats", "def compute_statistics(self):", "def compute_propagation_summary_statistics(data):\n return {\n type: _compute_summary_stats(entries)\n for type, entries in data.items()\n }", "def generate_statistics(data):\n print(data)\n for key in data:\n print('****\\nSummary data for %s:\\n----' % key)\n for category in data[key]:\n mean = generate_mean(data[key][category])\n print('\\taverage %s: %d' % (category, mean))\n print('\\n\\n')\n return", "def basic_stats(data):\n if isinstance(data, pd.DataFrame):\n return data.describe(percentiles=[0.5]).T.drop(['50%'], axis=1)\n else:\n return data.to_frame().describe(percentiles=[0.5]).T.drop(['50%'], axis=1)", "def _get_suff_statistics(self, data):\n raise NotImplementedError()\n # The only sufficient statistic is the KxKxB array of event counts assigned\n # to each of the basis functions\n if data is not None:\n ss = data.sum(axis=0)\n else:\n ss = np.zeros((self.K, self.K, self.B))\n\n return ss", "def data_statistics(title, data, stats=None, normalize=False, log_fun=logging.INFO):\n log_fun(\"Statistics %s\" % title)\n\n all_data = []\n for key in data:\n if not isinstance(data[key], list):\n all_data.append(data[key])\n else:\n all_data.extend(data[key])\n\n if normalize:\n minimum = min(all_data)\n maximum = max(all_data)\n all_data = [(d - minimum) / (maximum - minimum) for d in all_data]\n\n result = Statistics.calculate(all_data, stats, log_fun)\n FileWriter.write_dict_to_file(FileWriter.STATISTICS, title + \".txt\", result)", "def rct_stats(udf_data: UdfData):\n # The dictionary that stores the statistical data\n stats = {}\n # Iterate over each raster collection cube and compute statistical values\n for cube in udf_data.get_datacube_list():\n # make sure to cast the values to floats, otherwise they are not serializable\n stats[cube.id] = dict(\n sum=float(cube.array.sum()),\n mean=float(cube.array.mean()),\n min=float(cube.array.min()),\n max=float(cube.array.max())\n )\n # Create the structured data object\n sd = StructuredData(\n data=stats,\n type=\"dict\",\n description=\"Statistical data sum, min, max and mean for each raster collection cube as dict\",\n )\n # Remove all collections and set the StructuredData list\n udf_data.set_datacube_list(None)\n udf_data.set_structured_data_list([sd, ])", "def _get_suff_statistics(self, data):\n # The only sufficient statistic is the KxKxB array of event counts assigned\n # to each of the basis functions\n if data is not None:\n ss = data.sum(axis=0)\n else:\n ss = np.zeros((self.K, self.K, self.B))\n\n return ss", "def compute_summary(self, weather_data, ssh):\n\n for i, field in enumerate(self.weather_fields):\n weather_field = weather_data[...,i]\n\n self.summary[field] = (weather_field.mean(), weather_field.std())\n\n self.summary['ssh'] = (ssh.mean(), ssh.std())", "def get_statistics(data):\n v_min = None\n v_max = None\n v_avg = None\n v = None\n v_sum = .0\n count = 0\n for d in data:\n if d is None:\n continue\n try:\n v = float(d)\n except ValueError:\n print(pc.CRED, d, pc.CEND, end=',')\n continue\n if count == 0:\n v_min = v\n v_max = v\n else:\n if v < v_min:\n v_min = v\n if v > v_max:\n v_max = v\n v_sum += v\n count += 1\n if count > 0:\n v_avg = round(v_sum/count, 2)\n return v_min, v_max, v_avg", "def get_stats():\n datasets = [\n (\"../data_processing/data/page2answer_single_abstractive_summ.json\", \"p2a-single-abs\"),\n (\"../data_processing/data/page2answer_single_extractive_summ.json\", \"p2a-single-ext\"),\n (\"../data_processing/data/section2answer_multi_abstractive_summ.json\", \"s2a-multi-abs\"),\n (\"../data_processing/data/page2answer_multi_extractive_summ.json\", \"p2a-multi-ext\"),\n (\"../data_processing/data/section2answer_single_abstractive_summ.json\", \"s2a-single-abs\"),\n (\"../data_processing/data/section2answer_single_extractive_summ.json\", \"s2a-single-ext\"),\n (\"../data_processing/data/section2answer_multi_extractive_summ.json\", \"s2a-multi-ext\"),\n (\"../data_processing/data/question_driven_answer_summarization_primary_dataset.json\", \"complete_dataset\"),\n ]\n\n stats = SummarizationDataStats()\n for dataset in datasets:\n print(dataset[1])\n stats.load_data(dataset[0], dataset[1])\n stats.iterate_data()", "def dataset_statistics(dataset):\n print(dataset.describe())", "def dataset_statistics(dataset):\n print(dataset.describe())", "def dataset_statistics(dataset):\n print (dataset.describe())", "def summarize(dataset):\n summaries = [(np.mean(attribute), np.std(attribute)) for attribute in zip(*dataset)]\n\n return summaries", "def ComputeStats(data):\r\n avg = Mean(data)\r\n stdev = math.sqrt(Mean([(d-avg)**2 for d in data]))\r\n avgSq = Mean([d*d for d in data])\r\n stdevSq = math.sqrt(Mean([(d*d - avgSq)**2 for d in data]))\r\n corrs = [1]\r\n for i in range(1,5):\r\n cov = sum([(a*a-avgSq)*(b*b-avgSq)\r\n for (a, b) in zip(data[0:-i],data[i:])]\r\n ) / float(len(data) - i)\r\n corrs.append(cov/stdevSq/stdevSq)\r\n return avg, stdev, corrs", "def getStats(data, printStats=False):\n data = list(data)\n d = DataFrame()\n d['Avg'] = [np.mean(data)]\n d['len'] = [len(data)]\n d['min'] = [min(data)]\n d['max'] = [max(data)]\n d['std'] = [np.std(data)]\n d = d.transpose()\n d.columns = ['Stats']\n d = d.transpose()\n if printStats:\n print(d)\n return d", "def get_stats(self, datalist, state):\n data = {}\n for i, col in zip(range(5), datalist[0].items()):\n [diff, per_diff, tot_percentage] = self.get_diff_and_percentage(datalist[0].iloc[i], datalist[1].iloc[i],\n state)\n data[col[0]] = {\"value\": datalist[0].iloc[i], \"change\": diff, \"change_per\": per_diff,\n \"tot_percentage\": tot_percentage}\n return data", "def summary(data, key=itemgetter(0), value=itemgetter(1)):\n\n for k, group in groupby(data, key):\n yield (k, sum(value(row) for row in group))", "def summarizeData(data, dataLabel=None, decimals=4):\n if dataLabel is not None:\n print ('%s: Data Set Summary (median, IQR)' % dataLabel)\n n = max([len(l) for l in data.keys()])\n for i, k in enumerate(data.keys()):\n g1 = data[k]\n iqr1 = np.subtract(*np.percentile(g1, [75, 25]))\n print(u' {:s}: {:8.{pc}f}, {:.{pc}f} (median, IQR)'.\n format(k.rjust(n), np.median(g1), iqr1, pc=decimals))", "def get_summarized_results(self):\n stats = [v.stats() for (k, v) in self.examples.items() if v.is_ready()]\n res = self.ExampleClass.average_stats(stats)\n\n res['loss'] = self.loss/self.loss_cnt\n res['recent_loss'] = sum(self.recent_loss_array) / sum(self.recent_loss_bs_array)\n\n return res", "def summary(self) -> Dict[str, Dict[str, float]]:\n vals: Dict[str, List[float]] = defaultdict(list)\n if not self.steps: # pragma: no cover\n return {}\n\n for timing_dict in self._timings:\n for step in self.steps:\n if step in timing_dict:\n vals[step].append(timing_dict[step])\n summary = {}\n for step in self.steps:\n if vals[step]:\n summary[step] = {\n \"cnt\": len(vals[step]),\n \"sum\": sum(vals[step]),\n \"min\": min(vals[step]),\n \"max\": max(vals[step]),\n \"avg\": sum(vals[step]) / len(vals[step]),\n }\n return summary", "def compute(self, data):\n data_arr = np.array(data)\n assert len(data_arr.shape) == 3, \"required input shape is (n_points, n_species, n_timepoints)\"\n\n res = self.model_eval.predict(data_arr)\n\n if self.mean_trajectories:\n res = np.asarray(np.mean(res, axis=0)) # returns a scalar, so we cast it as an array\n\n if self.use_logger:\n self.logger.info(\"ANN_Statistics summary statistic: processed data matrix of shape {0} and generated summaries\"\n \" of shape {1}\".format(data.shape, res.shape))\n return res", "def get_statistics(self):\n return self.results" ]
[ "0.7708388", "0.75806755", "0.7545205", "0.7426084", "0.7330722", "0.71296847", "0.7019531", "0.69181734", "0.6914385", "0.6904229", "0.6891686", "0.68396693", "0.68380475", "0.6826168", "0.6781659", "0.67217654", "0.66204166", "0.6595634", "0.6595634", "0.6572965", "0.6572822", "0.6501245", "0.64520335", "0.6438952", "0.64028955", "0.639818", "0.6365827", "0.63406956", "0.6338296", "0.63329715" ]
0.76487106
1
Return a DirectiveError suitable for being thrown as an exception. Call "raise self.directive_error(level, message)" from within a directive implementation to return one single system message at level `level`, which automatically gets the directive block and the line number added. Preferably use the `debug`, `info`, `warning`, `error`, or `severe` wrapper methods, e.g. ``self.error(message)`` to generate an ERRORlevel directive error.
def directive_error(self, level, message): return DirectiveError(level, message)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def error(self, msg):\n if self.current_line and self.current_file:\n msg = '{}\\nError in {} line {}'.format(\n msg, self.current_file, self.current_line)\n return self.DirectiveError(msg)", "def error(self, message):\n return self.log(\"ERROR\", message)", "def debug_error(self, message):\n raise NotImplementedError", "def error ( self , message , *args , **kwargs ) :\n return self.logger.error ( message , *args , **kwargs )", "def error(self):\n return self._decorator_wrapper(EventName.error)", "def error(self, message):\r\n self._construct_partial_parser().error(message)", "def error(self, e):\n return \"{}: {} ({})\".format(e.__class__.__name__, e.__doc__, e.message)", "def exception(self) -> exceptions.ErrorMessageException:\n\n return ErrorMessage.ERROR_CODES_TO_EXCEPTIONS.get(\n self.error_code,\n exceptions.GenericException\n )", "def error(self, message, location):\n raise CompilerError(message, loc=location)", "def error(self, message, token=None):\n raise ParseException(\n message,\n self.filename,\n line=self._line,\n line_number=self._line_number,\n token=token)", "def error(self, msg, elem):\n if elem is not None:\n msg += \" (line %d)\" % elem.sourceline\n if self.ignore_errors:\n return self.warn(msg, elem)\n raise ParserException(msg)", "def detach_error(self) -> \"VolumeError\":\n return typing.cast(\n \"VolumeError\",\n self._properties.get(\"detachError\"),\n )", "def print_error(self, msg, line_num=False, errorFunc=SystemError):\n if line_num is False: line_num = self.line_num\n bad_line_ind = self.line_nums[line_num]\n\n err_msg = \"\\n\\n\\n############ ERROR #############\\n\"\n err_msg += \"Error in input_file '%s'\\n\\n---\\n\" % self.inp_filename\n err_msg += msg.strip(\"\\n\")\n err_msg += \"\\n---\\n\\nline number: %i\\n\" % self.line_nums[line_num]\n err_msg += f\"line: '{self.file_ltxt_orig[bad_line_ind]}'\"\n err_msg += \"\\n\"\n err_msg += f\"err id: {self.E_str}\"\n err_msg += \"\\n#################################\\n\\n\"\n raise errorFunc(err_msg)", "def get_error_message(self):\n return self.error_message.get_error_message()", "def error(self, message: str) -> None:\n lines = message.split('\\n')\n linum = 0\n formatted_message = ''\n for line in lines:\n if linum == 0:\n formatted_message = 'Error: ' + line\n else:\n formatted_message += '\\n ' + line\n linum += 1\n\n self.print_usage(sys.stderr)\n\n # Format errors with style_warning()\n formatted_message = ansi.style_warning(formatted_message)\n self.exit(2, '{}\\n\\n'.format(formatted_message))", "def error(self, message):\n raise ArgumentParseError(message)", "def get_message(self):\n if self.lineno:\n return \"Compilation error at line %d: %s\" % (self.lineno, self.details)\n else:\n return \"Compilation error: \" + self.details", "def error(cls, message):\n print('[ERROR] {0}'.format(message))", "def parse_error(self, message, exc_cls=VisualizerParseError):\n raise exc_cls(\"Error parsing %s '%s' (%s:%i): %s\" % \n (self.tag, self.ref, self.filename, self.lineno, message))", "def error(self, message, new_line=True):\n #\n # Note that while the call to \"get_caller()\" is costly, it only happens\n # when an error occurs, so it shouldn't impact performance\n #\n error_data = (message, self.get_caller())\n self._errors.append(error_data)", "def get_error(self):\n return self.e", "def _raise_device_exception(self, msg, category=DeviceException.OPERATION_FAILED):\n self._logger.error(msg)\n raise DeviceException(category, msg)", "def get_error_message(self):\n\n return self.err_message", "def logger(self):\n return DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def get_error(self):\n return self.exc_info", "def error(self, message):\n for_verbosity = 0\n if self.verbosity_level >= for_verbosity:\n self.logger.error(message, exc_info=True)", "def _error(self, token, msg):\n self._interpreter.parse_error(token, msg)\n return ParseError()", "def error(self, message: str):\n self.log(Level.ERROR, message)", "def getMessage(self):\n return _libsbml.XMLError_getMessage(self)", "def err(message):\n\n timestamp = format_time(get_time())\n message = '{} - [ERROR] - {}'.format(timestamp, message)\n _log_status(message)" ]
[ "0.6895362", "0.53421384", "0.5183119", "0.5157437", "0.49337742", "0.49263397", "0.48709092", "0.48167634", "0.47850382", "0.4774763", "0.4758415", "0.47475636", "0.47076467", "0.46929008", "0.46928918", "0.4671942", "0.46644497", "0.4658521", "0.46451083", "0.46432275", "0.4633507", "0.46231043", "0.46218944", "0.4621808", "0.4612353", "0.4561164", "0.45561552", "0.4529378", "0.45265886", "0.4495018" ]
0.85326535
0
Append self.options['name'] to node['names'] if it exists. Also normalize the name string and register it as explicit target.
def add_name(self, node): if 'name' in self.options: name = nodes.fully_normalize_name(self.options.pop('name')) if 'name' in node: del(node['name']) node['names'].append(name) self.state.document.note_explicit_target(node, node)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add(self, name):\n\n # no need to add first_name while adding full_name\n name_list = name.strip().split()[1:]\n name_list.append(name)\n for item in set(name_list):\n node = self.root\n # check for every char in word, i.e. check whether is it in trie\n # if yes, then move forward over that path\n # else, add node with given char\n for char in item.lower():\n if char not in node:\n node[char] = {}\n node = node[char]\n\n if \"NAME\" in node:\n node[\"NAME\"].append(name)\n else:\n node[\"NAME\"] = [name]", "def name(self, name: str):\n self.inst['targetname'] = name", "def normalize_names(self):\n for node in self.asset.findall(\".//*[@name]\"):\n name = node.get(\"name\")\n if not name.startswith(self.name + \".\"):\n node.set(\"name\", self.name + \".\" + name)\n\n for attr in ['texture', 'material', 'mesh']:\n for node in self.root.findall(\".//*[@{}]\".format(attr)):\n name = node.get(attr)\n if not name.startswith(self.name + \".\"):\n node.set(attr, self.name + \".\" + name)\n\n for node in self.worldbody.findall(\".//*[@name]\"):\n name = node.get(\"name\")\n if not name.startswith(self.name + \".\"):\n node.set(\"name\", self.name + \".\" + name)\n\n for node in self.worldbody.findall(\".//*[@joint]\"):\n joint = node.get(\"joint\")\n if not joint.startswith(self.name + \".\"):\n node.set(\"joint\", self.name + \".\" + name)", "def _add_name(self, msg, name):\n try:\n names = self.get_local(msg, \"names\")\n except KeyError:\n names = set()\n names.add(name)\n self.set_local(msg, \"names\", names)", "def addAlias(self, name):\r\n self._otherNames.append(name.strip().lower())", "def set_name(self, name):\r\n self.stream.set_node_name(self.node, name)", "def add_node(self, name, node):\n self.nodes.setdefault(name, node)", "def name(self, name: List[NameAndValue]):\n\n self._name = name", "def add_node(self, name):\n if not name in self._main_dictionary:\n self._main_dictionary[name] = set()", "def node_name(self, value: str):\n self._properties[\"nodeName\"] = value", "def add_name(self, name):\n self.name = name", "def add(self, node, name=None):\r\n\r\n name = name or self._generate_node_name()\r\n\r\n if name in self.nodes:\r\n raise KeyError(\"Node with name %s already exists\" % name)\r\n\r\n self.nodes[name] = node\r\n\r\n return name", "def add_person(self, name):\n\n if name not in self.nodes:\n # Be careful not to just add them a second time -- otherwise,\n # if we accidentally added someone twice, we'd clear our their list\n # of friends!\n self.nodes[name] = PersonNode(name)", "def setCollectorsNames(self, collectors_names):\n networkx.set_node_attributes(self, \n values=collectors_names,\n name='fullname')", "def put_node_variable_name(self, name, index):\n # 1 - based indexing!\n assert index <= self._f.dimensions[\"num_nod_var\"]\n\n self._f.variables[\"name_nod_var\"][index - 1] = b\"\"\n self._f.variables[\"name_nod_var\"][index - 1, :len(name)] = \\\n [_i.encode() if hasattr(_i, \"encode\") else _i for _i in name]", "def _name_changed(self):\n self._named = True", "def _on_node_name_changed(self, oldname, newname):\n if newname in self._nodes and self._nodes[oldname] != self._nodes[newname]:\n raise Exception(\"New name collides with existing node.\")\n node = self._nodes[oldname]\n self._nodes[newname] = node\n del self.__nodes[oldname]\n self.node_name_changed.emit(oldname, newname)", "def addNames(self, names):\n for name in names:\n self.tags.setdefault(name, ModelTag(name))", "def add_name(self, name: str) -> None:\n self._names.append(name)", "def addNode( self, name, **opts ):\n self.g.add_node( name, **opts )\n return name", "def record_name(self, name: str) -> None:\n if self.is_top_level():\n self._toplevel_names.append(name)", "def setnames(self, *args, **kwargs):\n return _coordsys.coordsys_setnames(self, *args, **kwargs)", "def set_name(self, newname=\"\"):\n self.name = newname", "def m_setName(node_name=\"root\", name_to_resolve=\"root\", current_owner_alias=\"ROOT\"):\n\n # Get the account info from the alias\n account_address, account_key = wallet.account_from_name(\n current_owner_alias, \"ThePassword\")\n\n # Set the name\n resolver.setName(node_name, name_to_resolve, account_key)", "def add_names(self, *sNames):\n self.names += list(sNames)", "def visit_Name(self, node):\n if isinstance(node.ctx, gast.Param) and node.id != \"self\":\n node.id += '_new'\n\n return node", "def set_name(self, name):\n self.options['name'] = name", "def set_name(self, name):\n # XXX: convert name to unicode, if it's a plain string?\n d = analyze_name(name, canonical=0)\n self.data.update(d)", "def update_names(self, aliases, get_path):\n for alias in aliases:\n path = get_path(alias.name)\n if path not in self.interesting:\n continue\n if self.interesting[path]:\n for attr in self.interesting[path]:\n name = '.'.join((alias.asname or alias.name, attr))\n self.names[name] = '.'.join((path, attr))\n else:\n name = alias.asname or alias.name\n self.names[name] = path", "def name(self, name):\n from_name = self.name\n assert isinstance(name, str)\n self._name = name\n if self.has_parent():\n self._parent_._name_changed(self, from_name)" ]
[ "0.6573605", "0.607759", "0.60158175", "0.59481245", "0.59406054", "0.5879365", "0.5829988", "0.57405627", "0.57331866", "0.5675645", "0.5675142", "0.56737715", "0.5626971", "0.5610032", "0.5598339", "0.555156", "0.5522539", "0.5516251", "0.54982877", "0.54964846", "0.54818475", "0.5480495", "0.5475034", "0.5461553", "0.5453235", "0.5428969", "0.5427444", "0.5394974", "0.53914", "0.53681475" ]
0.76973844
0
Define & return a directive class generated from `directive_fn`. `directive_fn` uses the oldstyle, functional interface.
def convert_directive_function(directive_fn): class FunctionalDirective(Directive): option_spec = getattr(directive_fn, 'options', None) has_content = getattr(directive_fn, 'content', False) _argument_spec = getattr(directive_fn, 'arguments', (0, 0, False)) required_arguments, optional_arguments, final_argument_whitespace \ = _argument_spec def run(self): return directive_fn( self.name, self.arguments, self.options, self.content, self.lineno, self.content_offset, self.block_text, self.state, self.state_machine) # Return new-style directive. return FunctionalDirective
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def directive(func):\n func.cfg_is_directive = True\n return func", "def from_directive(cls, directive, app):\n return cls(directive,\n app,\n arguments=directive.arguments,\n content=directive.content,\n options=directive.options)", "def with_cm4_doc(func):\n def new(instance, args, arguments):\n func(instance, args, arguments)\n\n new.__doc__ = cm4.command.command.__doc__\n return new", "def fmap(function, descriptor):\n return MappedDescriptor(descriptor, function)", "def validator_for(context_fn):\n\n def validator_for_decor(validator_fn):\n # Yes, this doesn't return a function! However, a Validator instance is\n # callable, so this is fine :)\n # See: https://stackoverflow.com/a/20791175 (and the other answers)\n return Validator(context_fn, validator_fn)\n return validator_for_decor", "def gen_rst_directive(fun_name, params):\n res = \".. cmake:function:: %s\" % fun_name\n res += \"(\"\n sig_params = [decorate(name, type) for(type, name, doc) in params]\n sig_params = [x for x in sig_params if x is not None]\n sig_params = \" \".join(sig_params)\n res += sig_params\n res += \")\"\n res += \"\\n\"\n res += \"\\n\"\n for param in params:\n (type, name, doc) = param\n if type == \"example\":\n # \\example is handled by gen_example_rst\n continue\n doc = doc.replace(\"\\n\", \" \")\n to_add = \":arg %s: %s\" % (name, doc)\n res += indent(to_add, 2)\n res += \"\\n\"\n return res", "def do(func, pure=False):\n def _dfunc(*args, **kwargs):\n return applyfunc(func, args, kwargs, pure=pure)\n\n with ignoring(AttributeError):\n _dfunc = wraps(func)(_dfunc)\n\n return _dfunc", "def create_decorated_class(klass, decorator, methods=None):\n class Decorated(klass): pass\n d_klass = Decorated\n decorate_class(d_klass, decorator, methods)\n return d_klass", "def command(func):\n classname = inspect.getouterframes(inspect.currentframe())[1][3]\n name = func.__name__\n help_name = name.replace(\"do_\", \"help_\")\n doc = textwrap.dedent(func.__doc__)\n\n def new(instance, args):\n # instance.new.__doc__ = doc\n try:\n argv = shlex.split(args)\n arguments = docopt(doc, help=True, argv=argv)\n func(instance, args, arguments)\n except SystemExit as e:\n if args not in ('-h', '--help'):\n print(\"Could not execute the command.\")\n print(e)\n print(doc)\n\n new.__doc__ = doc\n return new", "def make_decorator(func):\n def decorate(newfunc):\n if hasattr(func, 'compat_func_name'):\n name = func.compat_func_name\n else:\n name = func.__name__\n newfunc.__dict__ = func.__dict__\n newfunc.__doc__ = func.__doc__\n newfunc.__module__ = func.__module__\n if not hasattr(newfunc, 'compat_co_firstlineno'):\n newfunc.compat_co_firstlineno = func.func_code.co_firstlineno\n try:\n newfunc.__name__ = name\n except TypeError:\n # can't set func name in 2.3\n newfunc.compat_func_name = name\n return newfunc\n return decorate", "def function(fnc, *args, **kwargs):\n return Function(fnc, args=args, kwargs=kwargs).tunable()", "def decorator():\n return _decorator", "def get_decorated_function(self):", "def register_based_directives():\n if not BASED_LIB_RST:\n return\n\n if \"directives\" in BASED_LIB_RST:\n for dir_name, dir_cls_str in BASED_LIB_RST[\"directives\"].items():\n class_ = import_string(dir_cls_str)\n directives.register_directive(dir_name, class_)", "def decorator( this, native_decorator) :\n\t\tdef new_decorator( fn) :\n\t\t\tfn_decorator = native_decorator( fn)\n\t\t\tnative_fn = this._get_native_function( fn)\n\t\t\t\n\t\t\tthis._set_decorator( fn_decorator, new_decorator)\n\t\t\tthis._set_native_function( fn_decorator, native_fn)\n\t\t\tthis._set_native_function( new_decorator, native_fn)\n\t\t\tthis._append_decorator( native_fn, new_decorator)\n\t\t\t\n\t\t\treturn fn_decorator\n\t\t\n\t\tnew_decorator.__name__ = native_decorator.__name__\n\t\tnew_decorator.__doc__ = native_decorator.__doc__\n\t\t\n\t\treturn new_decorator", "def add_decl_function(node, func, is_override):\n name = func.__name__\n klass = node.klass\n if is_override:\n current = getattr(klass, name, None)\n if not getattr(current, \"_d_func\", False):\n raise TypeError(\"'%s' is not a declarative function\" % name)\n elif hasattr(klass, name):\n _override_fail(klass, name)\n d_func = DeclarativeFunction(func, node.scope_key)\n setattr(klass, name, d_func)", "def _get_directive_name(self):", "def auto_attr(func):\r\n return OneTimeProperty(func)", "def decorator(func):\n\n pass", "def command(func: 'function') -> 'function':\n func._decorators = (Bot.command,)\n return func", "def _directive_render(node: RenderTreeNode, context: RenderContext) -> str:\n # special directives that should only be used within substitutions\n if node.meta[\"module\"].endswith(\"misc.Replace\") and node.children:\n return \"\\n\\n\".join(child.render(context) for child in node.children[-1])\n if node.meta[\"module\"].endswith(\"misc.Date\"):\n return \"{sub-ref}`today`\"\n # TODO handle unicode directive\n\n name = node.meta[\"name\"]\n info_str = option_block = code_block = \"\"\n\n if node.children and node.children[0].type == \"directive_arg\":\n info_str = \"\".join(child.render(context) for child in node.children[0])\n info_str = \" \".join(info_str.splitlines()).strip()\n if info_str:\n info_str = \" \" + info_str\n\n if node.meta[\"options_list\"]:\n yaml_str = yaml_dump(\n {\n key: (True if val is None else (int(val) if val.isnumeric() else val))\n for key, val in node.meta[\"options_list\"]\n }\n )\n option_block = indent(yaml_str, \":\", lambda s: True).strip()\n\n if node.children and node.children[-1].type == \"directive_content\":\n content = \"\\n\\n\".join(child.render(context) for child in node.children[-1])\n if not option_block and content.startswith(\":\"):\n # add a new-line, so content is not treated as an option\n content = \"\\n\" + content\n elif option_block and content:\n # new lines between options and content\n option_block += \"\\n\\n\"\n code_block = content\n\n if option_block or code_block:\n # new line before closing fence\n code_block += \"\\n\"\n\n # Info strings of backtick code fences can not contain backticks or tildes.\n # If that is the case, we make a tilde code fence instead.\n if node.markup and \":\" in node.markup:\n fence_char = \":\"\n elif \"`\" in info_str or \"~\" in info_str:\n fence_char = \"~\"\n else:\n fence_char = \"`\"\n\n # The code block must not include as long or longer sequence of `fence_char`s\n # as the fence string itself\n fence_len = max(3, longest_consecutive_sequence(code_block, fence_char) + 1)\n fence_str = fence_char * fence_len\n return f\"{fence_str}{{{name}}}{info_str}\\n{option_block}{code_block}{fence_str}\"", "def run_directive(\n self, name: str, first_line: str, content: str, position: int\n ) -> List[nodes.Element]:\n # TODO directive name white/black lists\n\n self.document.current_line = position\n\n # get directive class\n directive_class, messages = directives.directive(\n name, self.language_module_rst, self.document\n ) # type: (Directive, list)\n if not directive_class:\n error = self.reporter.error(\n 'Unknown directive type \"{}\".\\n'.format(name),\n # nodes.literal_block(content, content),\n line=position,\n )\n return [error] + messages\n\n if issubclass(directive_class, Include):\n # this is a Markdown only option,\n # to allow for altering relative image reference links\n directive_class.option_spec[\"relative-images\"] = directives.flag\n directive_class.option_spec[\"relative-docs\"] = directives.path\n\n try:\n arguments, options, body_lines = parse_directive_text(\n directive_class, first_line, content\n )\n except DirectiveParsingError as error:\n error = self.reporter.error(\n \"Directive '{}': {}\".format(name, error),\n nodes.literal_block(content, content),\n line=position,\n )\n return [error]\n\n # initialise directive\n if issubclass(directive_class, Include):\n directive_instance = MockIncludeDirective(\n self,\n name=name,\n klass=directive_class,\n arguments=arguments,\n options=options,\n body=body_lines,\n lineno=position,\n )\n else:\n state_machine = MockStateMachine(self, position)\n state = MockState(self, state_machine, position)\n directive_instance = directive_class(\n name=name,\n # the list of positional arguments\n arguments=arguments,\n # a dictionary mapping option names to values\n options=options,\n # the directive content line by line\n content=StringList(body_lines, self.document[\"source\"]),\n # the absolute line number of the first line of the directive\n lineno=position,\n # the line offset of the first line of the content\n content_offset=0, # TODO get content offset from `parse_directive_text`\n # a string containing the entire directive\n block_text=\"\\n\".join(body_lines),\n state=state,\n state_machine=state_machine,\n )\n\n # run directive\n try:\n result = directive_instance.run()\n except DirectiveError as error:\n msg_node = self.reporter.system_message(\n error.level, error.msg, line=position\n )\n msg_node += nodes.literal_block(content, content)\n result = [msg_node]\n except MockingError as exc:\n error_msg = self.reporter.error(\n \"Directive '{}' cannot be mocked: {}: {}\".format(\n name, exc.__class__.__name__, exc\n ),\n nodes.literal_block(content, content),\n line=position,\n )\n return [error_msg]\n\n assert isinstance(\n result, list\n ), 'Directive \"{}\" must return a list of nodes.'.format(name)\n for i in range(len(result)):\n assert isinstance(\n result[i], nodes.Node\n ), 'Directive \"{}\" returned non-Node object (index {}): {}'.format(\n name, i, result[i]\n )\n return result", "def func_to_mod(f):\r\n deprecation_warning()\r\n def make(**kwinit):\r\n m = Module()\r\n outputs = f(**kwinit)\r\n if isinstance(outputs, list):\r\n for i,o in enumerate(outputs):\r\n setattr(m, 'output%(i)i', o)\r\n else:\r\n m.output = outputs\r\n\r\n return m\r\n return make", "def __call__(self, function: FuncSpeechArg):\n self._add_attr(function)\n return function", "def __call__(self, function: FuncStrArg):\n self._add_attr(function)\n return function", "def __def_function__():\n pass", "def classproperty(func):\n if not isinstance(func, (classmethod, staticmethod)):\n func = classmethod(func)\n\n return ClassPropertyDescriptor(func)", "def classproperty(func):\n if not isinstance(func, (classmethod, staticmethod)):\n func = classmethod(func)\n\n return ClassPropertyDescriptor(func)", "def make_delegate_method(delegateattribute, servicename):\n\n def delegate_to(self, delegate):\n setattr(self, delegateattribute, delegate)\n delegate_to.__doc__ = \"Delegate \" + servicename + \" to another helper object.\"\n return delegate_to", "def run(self):\n node = DirectiveNode(\n self.name,\n name=self.name,\n content=self.content,\n options=self.options,\n )\n return [node]" ]
[ "0.6722285", "0.62953544", "0.49523148", "0.492212", "0.49055788", "0.48488826", "0.48386657", "0.48240536", "0.48149803", "0.4797709", "0.4784566", "0.47539756", "0.46898127", "0.46874297", "0.46547228", "0.4631999", "0.46281904", "0.46124476", "0.46081212", "0.45765257", "0.4544046", "0.4532801", "0.4497612", "0.44956338", "0.44917586", "0.44883358", "0.44734073", "0.44734073", "0.4453059", "0.44357806" ]
0.7654575
0
Filter recipes by user's available time to cook
def filter_by_time(df, user): time = user.time_to_cook.replace('cooking_time_less_than_', '') return df.loc[df.minutes <= int(time)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_queryset(self):\n user = self.request.user\n return Recipe.objects.filter(created_by=user)", "def filter_by_date(items, start_time, end_time=None):\n start_time = parser.parse(start_time + \"UTC\").timestamp()\n if end_time:\n end_time = parser.parse(end_time + \"UTC\").timestamp()\n else:\n end_time = time.time()\n\n filtered_items = []\n for item in items:\n if 'time' in item:\n item_time = item['time']\n elif 'timestamp' in item:\n item_time = item['timestamp']\n timestamp = parser.parse(item_time + \"UTC\").timestamp()\n if end_time > timestamp > start_time:\n filtered_items.append(item)\n\n return filtered_items", "def available_processes(processes, time):\n return filter(lambda x: ((x['arrival_time'] <= time) and (x['remaining_time'] > 0)), processes)", "def timed_recipes():\n time = request.args.get('time', 0, type=int) #raw input from HTML page\n global time_global\n time_global = time #sets global time to inputted time, for use in search function\n return jsonify(cooktime=time_global) #returns a confirmation of the input tiime", "def filter_only_remaining(self,now):\n\t\ttimeshift = now.replace(tzinfo=\"Europe/London\")\n\t\treturn Programs([program for program in self.list if program.end > timeshift and program.end < now])", "def get_queryset(self):\n assigned_only = bool(\n int(self.request.query_params.get('assigned_only', 0))\n )\n queryset = self.queryset\n if assigned_only:\n queryset = queryset.filter(recipe__isnull=False)\n\n return queryset.filter(user=self.request.user).order_by('-name')", "def filter(self, event, *args, **kwargs):\n return event[\"expiration_datetime\"] <= datetime.now()", "def filter():\n ingredient = request.args.get(\"ingredient\")\n if ingredient == None: # no ingredient parameter was included in the request\n return Response(\n \"{\\\"error\\\":\\\"ingredient parameter is required\\\"}\",\n status=400,\n mimetype=\"application/json\")\n\n recipes = [\n recipe.to_json_dict()\n for recipe in recipebook.recipes\n if recipe.has_ingredient(ingredient)]\n\n return Response(\n json.dumps(recipes),\n mimetype=\"application/json\")", "def get_queryset(self):\n assigned_only = bool(\n int(self.request.query_params.get('assigned_only', 0))\n )\n queryset = self.queryset\n if assigned_only:\n queryset = queryset.filter(recipe__isnull=False)\n\n return queryset.filter(user=self.request.user).order_by('-name').distinct()", "def read(cls, *criteria, **kwargs):\n if not kwargs.get('removed', False):\n return cls.query.filter(cls.time_removed == 0, *criteria)\n return cls.query.filter(*criteria)", "def latest_updated(self, chef, date):\n added_recipes = chef.recipes_added.values_list('id', flat=True)\n return self.filter(Q(recipe__chef=chef) |\n Q(recipe__in=added_recipes, recipe__draft=False, recipe__private=False)) \\\n .filter(edit_date__gt=date) \\\n .order_by('edit_date')", "def filter():\n\n course = request.args['course-filter']\n\n # Get relevant recipes\n get_recipes = mongo.db.recipes.find({'course': {'$regex': course}})\n\n count_recipes = mongo.db.recipes.count_documents({'course':\n {'$regex': course}})\n\n if course == 'All':\n flash('Here are our all of our recipes:', 'success')\n return redirect(url_for('index'))\n # If there are no recipes with the selected course\n elif count_recipes == 0:\n flash('There are currently no ' + course + ' recipes', 'danger')\n return redirect(url_for('index'))\n else:\n flash('Here are our ' + course + ' recipes:', 'success')\n return render_template('filter.html', title=course + ' Recipes',\n recipes=get_recipes)", "def search_new(self, chef=None):\n MIN_PHOTOS = 4\n DAYS = 7\n after = now() - timedelta(DAYS)\n\n query = self.filter(draft=False, private=False)\n query = query.filter(creation_date__gt=after)\n query = query.annotate(num_photos=Count('photos')).filter(num_photos__gte=MIN_PHOTOS)\n query = query.exclude(chef__in=chef.following.all())\n query = query.exclude(pk__in=chef.recipes_added.all())\n query = query.exclude(chef=chef) # Don't show own recipes\n query = query.order_by('-final_score')\n return query", "async def filter(self, **kwargs):\n\n pass", "def Filter(self, info):\n\n add_item = True\n \n if add_item and (self.use_only_ids != None) and (uid not in self.use_only_ids):\n add_item = False\n \n if add_item and (info['latlong'] == None):\n add_item = False\n \n if add_item and (self.size != None) and (info['size'] < self.size):\n add_item = False\n \n if add_item and (self.expert_rank != None) and (info['expert_rank'] < self.expert_rank):\n add_item = False\n \n if add_item and (self.kosher != None) and (self.kosher == True and info['kosher'] == False):\n add_item = False\n \n if add_item and (self.visiting_center != None) and (self.visiting_center == True and info['visiting_center'] == False):\n add_item = False\n \n if add_item and (self.visiting_center_free_admission != None) and (self.visiting_center_free_admission == True and info['visiting_center_free_admission'] == False):\n add_item = False\n \n if add_item and self.visit_time != None:\n day_of_visit = time.strftime(\"%A\", time.localtime(self.visit_time)).lower()\n if info['hours'][day_of_visit] != None:\n closing_at_that_day = time.mktime(time.strptime(time.strftime(\"%A, %d %b %Y\", time.localtime(self.visit_time)) + \" %d:00:00\" % (info['hours'][day_of_visit]), \"%A, %d %b %Y %H:%M:%S\"))\n if self.visit_time > (closing_at_that_day - self._delta_time_before_close):\n add_item = False\n if day_of_visit == 'saturday' and self.kosher == True:\n add_item = False\n \n if add_item and self.use_weather: \n if not self.weather_client.GoodForWinery(self.weather_client.GetCondition(info['latlong'])):\n add_item = False\n \n return add_item", "def find_favorite_recipes_for_user(self, user_doc, count):\n try:\n self.client.connect()\n db = self.client[self.db_name]\n latest_user_doc = db[user_doc['_id']]\n if 'recipes' in latest_user_doc.keys():\n user_recipes = latest_user_doc['recipes']\n user_recipes.sort(key=lambda x: x['count'], reverse=True)\n recipes = []\n for i, recipe in enumerate(user_recipes):\n if i >= count:\n break\n recipes.append(recipe)\n return recipes\n else:\n return []\n finally:\n self.client.disconnect()", "def filter_by_cuisine(df, user):\n\n if len(user.cuisines) == 0:\n # User has selected no cuisines - do not need to remove recipes\n return df\n\n return df.loc[df.cuisines.isin(user.cuisines)]", "def test_filter_ingredients_assigned_to_recipes(self):\n in1 = Ingredient.objects.create(user=self.user, name='Apples')\n in2 = Ingredient.objects.create(user=self.user, name='Turkey')\n recipe = Recipe.objects.create(\n title='Apple Crumble',\n time_minutes=5,\n price=Decimal('4.50'),\n user=self.user,\n )\n recipe.ingredients.add(in1)\n\n res = self.client.get(INGREDIENTS_URL, {'assigned_only': 1})\n\n s1 = IngredientSerializer(in1)\n s2 = IngredientSerializer(in2)\n self.assertIn(s1.data, res.data)\n self.assertNotIn(s2.data, res.data)", "def get_queryset(self):\n\n circle = self.circle\n\n if self.action not in ['finish', 'qualify']:\n offset = timezone.now() + timedelta(minutes=10)\n\n queryset = circle.ride_set.filter(\n available_seats__gte=1,\n departure_date__gte=offset\n )\n else:\n queryset = circle.ride_set.all()\n\n return queryset", "def filter_items(self, filter_data: Dict[str, str] = None) -> List[WalletItem]:\n filtered_items = self.items\n for key, value in filter_data.items():\n if key == \"category\":\n filtered_items = [item for item in filtered_items\n if re.search(value, item.category, re.IGNORECASE)]\n if key == \"account\":\n filtered_items = [item for item in filtered_items\n if re.search(value, item.account, re.IGNORECASE)]\n if key == \"notes\" in filter_data:\n filtered_items = [item for item in filtered_items\n if re.search(value, item.notes, re.IGNORECASE)]\n if key == \"amt_min\":\n value = float(value)\n filtered_items = [item for item in filtered_items if item.amount >= value]\n if key == \"amt_max\":\n value = float(value)\n filtered_items = [item for item in filtered_items if item.amount <= value]\n if key == \"begin_date\":\n try:\n begin_date = datetime.strptime(value, '%d/%m/%Y')\n filtered_items = [item for item in filtered_items if begin_date <= item.date]\n except ValueError as ex:\n print(ex)\n exit(1)\n if key == \"end_date\":\n try:\n end_date = datetime.strptime(value, '%d/%m/%Y')\n filtered_items = [item for item in filtered_items if item.date <= end_date]\n except ValueError as ex:\n print(ex)\n exit(1)\n return filtered_items", "def test_filter_recipe_by_ingredient(self):\n recipe1 = sample_recipe(user=self.user, title=\"egg curry\")\n recipe2 = sample_recipe(user=self.user, title='dal curry')\n ingredient1 = sample_ingredient(user=self.user, name='egg')\n ingrident2 = sample_ingredient(user=self.user, name='dal')\n recipe1.ingredient.add(ingredient1)\n recipe2.ingredient.add(ingrident2)\n recipe3 = sample_recipe(user=self.user, title='potato curry')\n\n res =self.client.get(RECIPE_URL,\n {'ingredient': f'{ingredient1.id},{ingrident2.id}'})\n seralizer1 = Recipeserializer(recipe1)\n seralizer2 = Recipeserializer(recipe2)\n seralizer3 = Recipeserializer(recipe3)\n\n self.assertIn(seralizer1.data, res.data)\n self.assertIn(seralizer2.data, res.data)\n self.assertNotIn(seralizer3.data, res.data)", "def get_queryset(self):\n\t\treturn Fishery.objects.filter(updated_date__lte=timezone.now())", "def get_all_recipes(current_user):\n # default query\n query = Recipe.query.join(Recipe.used).group_by(Recipe.id)\n\n # get query string\n query_string = request.args\n\n # filters\n name_keyword = query_string.get('name')\n text_keyword = query_string.get('text')\n ingredient_keyword_list = query_string.getlist('ingredient')\n\n # order\n order_keyword = query_string.get('order')\n\n # go through args\n if name_keyword:\n query = query.filter(Recipe.name.ilike(f'%{name_keyword}%'))\n if text_keyword:\n query = query.filter(Recipe.text.ilike(f'%{text_keyword}%'))\n if ingredient_keyword_list and len(ingredient_keyword_list) != 0:\n print('ingredients found')\n\n # for item in ingredient_keyword_list:\n # item.lower()\n\n # query = query.filter(func.lower(Ingredient.name).in_(ingredient_keyword_list))\n # query = query.join(Ingredient.used).group_by(Recipe.id)\n\n # for ingredient in ingredient_keyword_list:\n # query_tmp = query.filter(Ingredient.name.ilike(f'%{ingredient}%'))\n # print(ingredient)\n # # query = query.filter(Ingredient.name.ilike(f'%{ingredient}%'))\n # query = query.intersect(query_tmp)\n # print(query.all())\n # print(query_tmp.all())\n\n # order the query\n if order_keyword:\n\n if order_keyword == 'max_ing':\n \"\"\"\n filter recipes by ingredients used\n\n select recipe_id, count(ingredient_id) from ingredients_used\n group by recipe_id\n order by count(ingredient_id) <desc - asc>\n \"\"\"\n query = query.order_by(desc(func.count(Ingredient.id))) \n else:\n query = query.order_by(func.count(Ingredient.id))\n\n\n query = query.all()\n if len(query) == 0:\n return jsonify({'message': 'No recipes found'}), 204\n\n output = []\n for recipe in query:\n recipe_obj = {}\n recipe_obj['name'] = recipe.name\n recipe_obj['average_rating'] = recipe.average_rating\n recipe_obj['text'] = recipe.text\n recipe_obj['author'] = recipe.author.email\n recipe_obj['ingredients'] = []\n\n for ingredient in recipe.used:\n recipe_obj['ingredients'].append(ingredient.name)\n \n output.append(recipe_obj)\n\n return jsonify({'message': 'Enjoy these recipes :)', 'data': output})", "def filt(item):\n result = (((item.done and opt.list_complete) or\n (not item.done and not opt.hide_incomplete)) and\n ((item.time is None) or\n ((opt.start_date is None or opt.start_date < item.time) and\n item.time < opt.end_date)))\n for arg in args:\n result = result and (re.search(arg, item.text) != None)\n return result", "def get_recipe_titles(self, user, title):\n user_recipes = []\n for recipe in self.__recipe:\n if recipe['created_by'] == user and recipe['title'] == title:\n user_recipes.append(recipe)\n return user_recipes", "def test_filter_recipes_by_ingredients(self):\n recipe1 = sample_recipe(self.user, title='Waxy box')\n recipe2 = sample_recipe(self.user, title='Waxy noodle beans')\n ingredient1 = sample_ingredient(self.user, name='box')\n ingredient2 = sample_ingredient(self.user, name='Wet noodle')\n recipe1.ingredients.add(ingredient1)\n recipe2.ingredients.add(ingredient2)\n recipe3 = sample_recipe(self.user)\n\n res = self.client.get(\n RECIPE_URL,\n {'ingredients': f'{ingredient1.id},{ingredient2.id}'}\n )\n serializer1 = RecipeSerializer(recipe1)\n serializer2 = RecipeSerializer(recipe2)\n serializer3 = RecipeSerializer(recipe3)\n self.assertIn(serializer1.data, res.data)\n self.assertIn(serializer2.data, res.data)\n self.assertNotIn(serializer3.data, res.data)", "def Cook(self, env, customer, cooking_time_type = 'fixed', manual_cook_time = None):\n with self.resource.request() as req:\n yield req #resource를 점유 해야 함.\n now_time = round(env.now , 1)\n req.info = [customer.name, now_time]\n if cooking_time_type == 'fixed':\n cooking_time = self.order_ready_time\n elif cooking_time_type == 'random':\n cooking_time = random.randrange(1,self.order_ready_time)\n elif cooking_time_type == 'uncertainty':\n cooking_time = customer.cook_time\n else:\n cooking_time = 0.001\n print('T :{} 가게 {}, {} 분 후 주문 {} 조리 완료'.format(int(env.now),self.name,cooking_time,customer.name))\n if manual_cook_time == None:\n yield env.timeout(cooking_time)\n else:\n yield env.timeout(manual_cook_time)\n print('T :{} 가게 {} 주문 {} 완료'.format(int(env.now),self.name,customer.name))\n customer.food_ready = True\n customer.ready_time = env.now\n self.ready_order.append(customer)", "def getRecipesByUser(cls, user=None):\n\n recipes = (db.session.query(Recipe).join(RecipeUser).\\\n filter(Recipe.recipe_id == RecipeUser.recipe_fk).\\\n filter(RecipeUser.user_fk == user).all())\n\n return recipes", "def get_filtered_recipes(request):\n page_number = 1\n\n if not request.GET:\n # If GET data is empty, assume no filters and return list of all recipes\n recipes = Recipe.objects.all().order_by('published_date')\n else:\n get_data = request.GET.dict()\n\n page_number = get_data.get('page', None)\n if page_number is None:\n raise Http404\n\n sorting = request.GET.get('sorting', 'published_date')\n\n possible_sortings = ['up_votes', 'published_date', 'title']\n if sorting not in possible_sortings:\n raise Http404\n\n if sorting == 'up_votes':\n sorting = '-up_votes'\n\n get_data.pop('page')\n get_data.pop('sorting')\n # If GET data is not empty, choose only recipes that match passed ingredients\n ingredients = []\n for pk in get_data:\n ingredient = Ingredient.objects.get(pk=pk)\n ingredients.append(ingredient)\n\n # Filter the recipes, gets all the recipes that match at least 1 element\n recipes_all = Recipe.objects.filter(ingredients__in=ingredients).order_by(sorting)\n # Remove duplicates\n unique_recipes = set()\n recipes = [r for r in recipes_all if not (r in unique_recipes or unique_recipes.add(r))]\n\n # Paginate, throws 404 on error\n context = do_pagination(recipes, NUMBER_OF_ELEMENTS_ON_PAGE, page_number)\n\n return HttpResponse(json.dumps(context), content_type='application/json')", "async def get_last_recommended_recipes(\n time_period: int = 3600, db_path: Path = DB_PATH\n) -> dict:\n recipes = await get_query_results(\n \"SELECT recipe_name, last_recommended FROM recipes\", db_path=db_path\n )\n\n current_time = int(time())\n cutoff_point = current_time - time_period\n\n recommended_recipes = []\n for recipe_name, last_recommended in recipes:\n if last_recommended > cutoff_point:\n recommended_recipes.append(recipe_name)\n\n recommended_recipes.sort()\n\n return {\"last_recommended_recipes\": recommended_recipes}" ]
[ "0.5769205", "0.572824", "0.5692194", "0.5493567", "0.54826933", "0.53807163", "0.53594685", "0.5334486", "0.52857834", "0.52453613", "0.5186571", "0.51753074", "0.5142366", "0.5119954", "0.5114708", "0.5108634", "0.5098999", "0.50934833", "0.5086736", "0.5082143", "0.50692123", "0.5060681", "0.5010046", "0.5005328", "0.49883413", "0.49821365", "0.49700812", "0.49648893", "0.4963236", "0.49477395" ]
0.6221729
0
Flag and remove the recipes that contain one or more ingredients that the user is allergic to.
def remove_recipes_with_allergies(df, user): if len(user.allergies) == 0: # User has no allergies - do not need to remove recipes return df allergies = [a + '_allergic' for a in user.allergies] return df.loc[df[allergies].any(1) == False]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter_non_ingredient(ingredient_list):\n stop_words = set(stopwords.words('english'))\n \n filtered_list = []\n add_list = 0 #a dummy variable to add a text to filtered list\n for phrases in set(ingredient_list): #run through only one item in set (removes duplicates)\n\n for word in phrases:\n if word in stop_words:\n phrases.replace(word,'')\n\n #if one of the word in a phrase is ingredient, counts in to list\n for word in word_tokenize(phrases): #phrases can be phrase (run through phrases)\n \n is_ingredient = is_it_ingredient(word) #returns true if a word is ingridient\n \n if is_ingredient == True:\n add_list = 1\n else:\n add_list = 0\n\n ##if one of the word in a phrase is ingredient, counts in to list\n if add_list == 1 :\n\n filtered_list.append(phrases.capitalize())\n add_list = 0 \n\n return filtered_list", "def filter():\n ingredient = request.args.get(\"ingredient\")\n if ingredient == None: # no ingredient parameter was included in the request\n return Response(\n \"{\\\"error\\\":\\\"ingredient parameter is required\\\"}\",\n status=400,\n mimetype=\"application/json\")\n\n recipes = [\n recipe.to_json_dict()\n for recipe in recipebook.recipes\n if recipe.has_ingredient(ingredient)]\n\n return Response(\n json.dumps(recipes),\n mimetype=\"application/json\")", "def clean_ingredients(self):\n ingredients = self.cleaned_data['ingredients']\n if len(ingredients) < 1:\n v_err('no_ing')\n return ingredients", "def _strip_excess_data(recipe):\n for key in list(recipe.keys()):\n if key == \"ingredients\" or key == \"steps\":\n continue\n elif \"ingredient\" in key or \"step\" in key:\n del recipe[key]\n\n return recipe", "def get_beer_ingredients(beer):\n beer_ingredients = []\n for ing in beer['ingredients']:\n for item in beer['ingredients'][ing]:\n if 'name' in item:\n if item['name'] not in beer_ingredients:\n beer_ingredients.append(item['name'])\n\n return beer_ingredients", "def compute_allergens(foods):\n\n # Create a dictionary mapping allergens to lists\n # of ingredients that may contain that allergen\n allergen_foods = {}\n for ingredients, allergens in foods:\n for allergen in allergens:\n allergen_foods.setdefault(allergen, []).append(set(ingredients))\n\n # For each allergen, compute the intersection of the lists\n # computed above. This will give us the set of ingredienta\n # that could contain that allergen\n candidate_ingredients = {}\n for allergen in allergen_foods:\n candidate_ingredients[allergen] = set.intersection(*allergen_foods[allergen])\n\n # Repeatedly find an allergen that can only be matched to a single\n # ingredient, and remove that ingredient from the list of candidate\n # ingredients for all the other allergens.\n allergens = {}\n while len(candidate_ingredients) > 0:\n\n for single_allergen, cings in candidate_ingredients.items():\n if len(cings) == 1:\n ingredient = cings.pop()\n allergens[single_allergen] = ingredient\n break\n\n del candidate_ingredients[single_allergen] \n\n for allergen in candidate_ingredients:\n if allergen != single_allergen:\n ingredient = allergens[single_allergen]\n candidate_ingredients[allergen].discard(ingredient)\n\n return allergens", "def remove_wearables_without_diary(self):\n mark_for_removal = []\n for wearable in self.wearables.values():\n if wearable.diary is None:\n mark_for_removal.append(wearable.get_pid())\n\n for pid in mark_for_removal:\n print(\"Removing wearable %s.\" % pid)\n self.remove_wearable(pid)\n\n return len(mark_for_removal)", "def delete_old_recipe_ingredients(cls, recipeid):\n\n deleted_ingredients = Ingredient.query.filter_by(recipe_id=recipeid).delete()\n\n return deleted_ingredients", "def check_recipes(self):\n\n self.recipe = None\n\n for recipe in all_recipes:\n if recipe.matches(self.crafting, self.crafting_stride):\n self.recipe = recipe", "def test_excludeIngredientQuery(self) -> None:\n ingredient0 = 'multimedia'\n ingredient1 = 'provision'\n result = self.entries.exclude(Q(ingredients__icontains=ingredient0) | Q(ingredients__icontains=ingredient1))\n self.assertEqual(988, len(result))\n\n queries = (Q(ingredients__icontains=ingredient0), Q(ingredients__icontains=ingredient1))\n result = self.entries.exclude(functools.reduce(operator.or_, queries))\n self.assertEqual(988, len(result))", "def is_it_ingredient(word):\n reject_synsets = ['meal.n.01', 'meal.n.02', 'dish.n.02', 'vitamin.n.01']\n reject_synsets = set(wordnet.synset(w) for w in reject_synsets)\n accept_synsets = ['food.n.01', 'food.n.02']\n accept_synsets = set(wordnet.synset(w) for w in accept_synsets)\n for word_synset in wordnet.synsets(word, wordnet.NOUN):\n all_synsets = set(word_synset.closure(lambda s: s.hypernyms()))\n all_synsets.add(word_synset)\n for synset in reject_synsets:\n if synset in all_synsets:\n return False\n for synset in accept_synsets:\n if synset in all_synsets:\n return True", "def clear_ga_vasp_ingredient(self, ingred_name=\"\", new_seed_file=\"\"):\n if ingred_name == \"\":\n self.logger.error(\"Needs an ingredient name!\")\n return None\n fullpath = os.path.join(os.path.dirname(self.keywords['name']), ingred_name)\n self.logger.info(\"Removing directories and files from ingredient specified at %s\" % fullpath)\n dircontents = os.listdir(fullpath)\n subfolders = list()\n import shutil\n for diritem in dircontents:\n fulldir = os.path.join(fullpath,diritem)\n if os.path.isdir(fulldir) and diritem.isdigit():\n subfolders.append(fulldir)\n for subfolder in subfolders:\n shutil.rmtree(subfolder)\n files_to_remove = list()\n files_to_remove.append(\"starting.xyz\")\n for filename in files_to_remove:\n os.remove(os.path.join(fullpath, filename))\n shutil.copy(os.path.join(self.keywords['name'], new_seed_file), os.path.join(fullpath, \"starting.xyz\"))\n from MAST.ingredients import BaseIngredient\n cleared_ing = BaseIngredient(name=fullpath)\n cleared_ing.change_my_status(\"W\")\n return", "def solve_part1(start):\n all_ilists = load_inputs()\n\n allergen_map = get_allergen_map(all_ilists)\n all_ingredients = get_all_ingredients(all_ilists)\n\n all_potential_bad_ingredients = set()\n\n for l in allergen_map.values():\n all_potential_bad_ingredients.update(l)\n\n safe_ingredients = [a for a in all_ingredients if a not in all_potential_bad_ingredients]\n\n safe_ingred_count = 0\n for ilist in all_ilists:\n this_ingredients = ilist.get_ingredients()\n this_safe_ingredients = [a for a in this_ingredients if a in safe_ingredients]\n safe_ingred_count += len(this_safe_ingredients)\n\n return safe_ingred_count", "def test_filter_recipes_by_ingredients(self):\n recipe1 = sample_recipe(self.user, title='Waxy box')\n recipe2 = sample_recipe(self.user, title='Waxy noodle beans')\n ingredient1 = sample_ingredient(self.user, name='box')\n ingredient2 = sample_ingredient(self.user, name='Wet noodle')\n recipe1.ingredients.add(ingredient1)\n recipe2.ingredients.add(ingredient2)\n recipe3 = sample_recipe(self.user)\n\n res = self.client.get(\n RECIPE_URL,\n {'ingredients': f'{ingredient1.id},{ingredient2.id}'}\n )\n serializer1 = RecipeSerializer(recipe1)\n serializer2 = RecipeSerializer(recipe2)\n serializer3 = RecipeSerializer(recipe3)\n self.assertIn(serializer1.data, res.data)\n self.assertIn(serializer2.data, res.data)\n self.assertNotIn(serializer3.data, res.data)", "def test_filtered_ingredients_unique(self):\n ing = Ingredient.objects.create(user=self.user, name='Eggs')\n Ingredient.objects.create(user=self.user, name='Lentils')\n recipe1 = Recipe.objects.create(\n title='Eggs Benedict',\n time_minutes=60,\n price=Decimal('7.00'),\n user=self.user,\n )\n recipe2 = Recipe.objects.create(\n title='Herb Eggs',\n time_minutes=20,\n price=Decimal('4.00'),\n user=self.user,\n )\n recipe1.ingredients.add(ing)\n recipe2.ingredients.add(ing)\n\n res = self.client.get(INGREDIENTS_URL, {'assigned_only': 1})\n\n self.assertEqual(len(res.data), 1)", "def filter_output_dict(output_dict):\n global filter_ingredients\n if filter_ingredients:\n filtered_dict = {k: v for k, v in\n output_dict.iteritems() if\n all(filter_item in v['ingredients']\n for filter_item in filter_ingredients)}\n return filtered_dict\n else:\n return output_dict", "def test_filter_ingredients_assigned_to_recipes(self):\n in1 = Ingredient.objects.create(user=self.user, name='Apples')\n in2 = Ingredient.objects.create(user=self.user, name='Turkey')\n recipe = Recipe.objects.create(\n title='Apple Crumble',\n time_minutes=5,\n price=Decimal('4.50'),\n user=self.user,\n )\n recipe.ingredients.add(in1)\n\n res = self.client.get(INGREDIENTS_URL, {'assigned_only': 1})\n\n s1 = IngredientSerializer(in1)\n s2 = IngredientSerializer(in2)\n self.assertIn(s1.data, res.data)\n self.assertNotIn(s2.data, res.data)", "def test_search_recipes_by_ingredients(self):\n pass", "def level_zero(self):\n self.new_ingredients = self.current_recipe.toppings", "def removing_ingridients(self, value):\n self._removing_ingridients = value\n if value:\n self._adding_ingridients = not value\n self._adding_meals = not value", "def test_filter_recipe_by_ingredient(self):\n recipe1 = sample_recipe(user=self.user, title=\"egg curry\")\n recipe2 = sample_recipe(user=self.user, title='dal curry')\n ingredient1 = sample_ingredient(user=self.user, name='egg')\n ingrident2 = sample_ingredient(user=self.user, name='dal')\n recipe1.ingredient.add(ingredient1)\n recipe2.ingredient.add(ingrident2)\n recipe3 = sample_recipe(user=self.user, title='potato curry')\n\n res =self.client.get(RECIPE_URL,\n {'ingredient': f'{ingredient1.id},{ingrident2.id}'})\n seralizer1 = Recipeserializer(recipe1)\n seralizer2 = Recipeserializer(recipe2)\n seralizer3 = Recipeserializer(recipe3)\n\n self.assertIn(seralizer1.data, res.data)\n self.assertIn(seralizer2.data, res.data)\n self.assertNotIn(seralizer3.data, res.data)", "def update_fridge (self,ingredient) :\n for ing in self.fridge :\n if ing.equals(ingredient) :\n qty_used = min(ing.quantity,ingredient.quantity)\n ing.set_quantity(ing.quantity - qty_used)", "def test_filter_recipe_by_ingredients(self):\n recipe1 = sample_recipe(user=self.user, title='chicken curry')\n recipe2 = sample_recipe(user=self.user, title='mutton curry')\n recipe3 = sample_recipe(user=self.user, title='milk dish')\n ing1 = sample_ingredient(user=self.user,name='chicken')\n ing2 = sample_ingredient(user=self.user,name='mutton')\n recipe1.ingredient.add(ing1)\n recipe2.ingredient.add(ing2)\n\n res = self.client.get(RECIPE_URL,{'ingredient':f'{ing1.id},{ing2.id}'})\n\n serializer1 = RecipeSerializer(recipe1)\n serializer2 = RecipeSerializer(recipe2)\n serializer3 = RecipeSerializer(recipe3)\n\n self.assertIn(serializer1.data,res.data)\n self.assertIn(serializer2.data,res.data)\n self.assertNotIn(serializer3.data,res.data)", "def get_ingred_exclusions(user_id):\n\n exclusions = ExcludedIngredient.query.filter_by(user_id=user_id).all()\n if exclusions:\n exclusion_list = []\n for exclusion in exclusions:\n exclusion_list.append(exclusion.ingred_name)\n\n return exclusion_list\n else:\n return None", "def pack_has_eaten(self, elk_to_eat):\n # Remove elk\n for elk in elk_to_eat:\n self.model.grid.remove_agent(elk)\n self.model.schedule.remove(elk)\n logging.debug('Pack has eated, disbanding pack with size {}'.format(\n len(self.wolves))\n )\n for wolf in self.wolves:\n wolf.energy += self.model.wolf_gain_from_food*len(elk_to_eat)\n wolf.kills += 1\n self.remove_from_pack(wolf)\n # Remove pack from scheduler\n self.model.grid.remove_agent(self)\n self.model.schedule.remove(self)", "def test_search_by_bad_ingredients(self):\n recipe_id = self.request_mgr.search_by_ingredients(['asdfadsfa'])\n self.assertEqual(recipe_id, None)", "def _check_if_ingredients_sufficient(self, drink_type: str, drink_details: Beverages) -> None:\n drink_composition = drink_details.get_receipe(drink_type=drink_type)\n for ingredient in drink_composition:\n if not self._inventory.check_if_ingredient_sufficient(\n ingredient=ingredient, quantity=drink_composition.get(\n ingredient, 0)\n ):\n raise InventoryInSufficient(\n inventory_type=ingredient, drink_type=drink_type\n )", "def get_ingredients(self):\n try:\n ingredients = self.soup.find_all(class_=[\"recipe-table\", \"table-list-header\"])\n ingredients_list = []\n for elem in ingredients:\n if elem.name == \"h4\" and elem.text.strip() != \"\":\n ingredients_list.append(\"\\n\\n\" + elem.text.strip() + \"\\n\\n\")\n elif elem.name == \"table\":\n rows = text_maker.handle(str(elem)).split(\"\\n\")\n rows = \"\\n\".join(\"* \" + r for r in rows if r.strip())\n ingredients_list.append(rows)\n self.ingredients = \"\".join(ingredients_list).strip()\n except Exception:\n current_app.logger.error(f\"Could not extract ingredients: {traceback.format_exc()}\")\n self.ingredients = \"\"", "def recipes(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Recipe]:\n pass", "def test_search_by_ingredients(self):\n recipe_id = self.request_mgr.search_by_ingredients(['butter', 'sugar', 'eggs'])\n self.assertGreater(recipe_id, 0)" ]
[ "0.68335766", "0.6233659", "0.60058457", "0.5908703", "0.58015865", "0.5784165", "0.57681704", "0.5734267", "0.57105947", "0.5706079", "0.5674646", "0.5661127", "0.5651522", "0.5615922", "0.56022453", "0.55409044", "0.55026895", "0.5501584", "0.5482974", "0.54780596", "0.5468092", "0.5456578", "0.545608", "0.54367715", "0.5426194", "0.5413399", "0.5395561", "0.5387847", "0.53841096", "0.5382559" ]
0.6549054
1
Get list of unique tags in dataset (this is slow)
def get_unique_tags(df): tags = [] for index, row in df.iterrows(): tags = list(set(tags + ast.literal_eval(row.tags))) pdb.set_trace()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dataset_tags(connection):\n assert connection\n query = \"\"\"select * from tags()\"\"\"\n result = sqlio.read_sql_query(query, connection)\n return [item.strip() for item in result['name']], [tag_id.strip() for tag_id in result['tag_id']]", "def get_unique_semantic_labels() -> Set[int]:\n idxs = set()\n data = SUNRGBDTrainDataset(True)\n for i in range(len(data)):\n idxs.update([x.item() for x in t.unique(data[i][1])])\n return idxs", "def find_usefull_tags(tags, tagmodel, tag_count_vect):\n\n final_tags = []\n for tag in tags:\n if tag == None:\n continue\n else:\n tagpd = pd.Series(tag)\n tag_feature = tag_count_vect.transform(tagpd)\n result = tagmodel.predict(tag_feature)\n\n result = result.tolist() \n result = str(result)\n if result == '[1]':\n final_tags.append(tag)\n final_tags = list(dict.fromkeys(final_tags))\n return(final_tags)", "def get_all_tags(self, dataset: \"Dataset\") -> List[\"DatasetTag\"]:\n raise NotImplementedError", "def tag_set(self):\n tag_set = set()\n for tag_token in self.tags:\n tag_set.add(tag_token['tag'])\n return tag_set", "def tags(self):\n tag_docs = self.tag_data\n tags = set([x[\"tag\"] for x in tag_docs])\n # remove the \"thawed\" tag\n tags.discard(\"thawed\")\n return tags", "def get_tags(self):\r\n\r\n\r\n\r\n #using database\r\n\r\n if self.using_database:\r\n aprint('GET TAGS')\r\n value_tuple = (notebookname,)\r\n db_cursor.execute(\"SELECT tag\"\r\n +\" FROM tags_to_keys\"\r\n +\" WHERE notebook=?;\",\r\n value_tuple)\r\n fetched = db_cursor.fetchall()\r\n if fetched:\r\n return {tag[0] for tag in fetched}\r\n\r\n return set()\r\n\r\n #using shelf\r\n\r\n return self.tag_dict.keys()", "def get_user_list(dataset):\n res = dataset\\\n .map(lambda x: x[0])\\\n .collect()\n return list(set(res))", "def _get_tags(tempfile=TEMPFILE):\n with open(tempfile) as f:\n content = f.read().lower()\n # take a small subset to keep it performant\n tags = TAG_HTML.findall(content)\n tags = [tag for tag in tags if len(tag) > MIN_TAG_LEN]\n return set(tags)", "def all_tags(self):\n tags = set()\n query = self.sql_session.query(Feature).all()\n for tag in query:\n tags.add((tag.key, json.loads(tag.value)))\n return tags", "def list_all_tags(self,obs):", "def tags():", "def _tags(self):\n retval = []\n for of in self.tagnames:\n retval.append([of, self.get_datatype(of), self.get(of)])\n return retval", "def select_unique_ids(self):\n utk = self.metadata\n utk_ids = []\n for gg in set(utk['gender']):\n for rg in set(utk['race']):\n for ag in set(utk['age']):\n try:\n intersection_ids = list(utk[np.logical_and(utk['gender'] == gg,\n np.logical_and(utk['race'] == rg,\n utk['age'] == ag))]['filename'])\n if len(intersection_ids) <= CAP:\n utk_ids += intersection_ids\n else:\n x = list(np.random.choice(intersection_ids, CAP, replace=False))\n utk_ids += x\n\n except:\n continue\n self.unique_ids = utk_ids\n return utk_ids", "def get_node_tags(self, elem_tag):\n all_node_tag = np.array([], dtype=int)\n if np.size(elem_tag) > 1:\n for ie in range(len(elem_tag)):\n all_node_tag = np.concatenate(\n (all_node_tag, self.get_connectivity(elem_tag[ie]))\n )\n all_node_tag = np.unique(all_node_tag)\n else:\n all_node_tag = self.get_connectivity(elem_tag)\n\n return all_node_tag", "def get_tags(self) -> Set[Text]:\r\n return {tag for tag in self.tags}", "def get_all_tags_list(cls):\n all_tags_list = []\n # obj_list = cls.objects.filter(status=0).order_by('-update_time')\n obj_list = Article.objects.all()\n for obj in obj_list:\n all_tags_list = all_tags_list + obj.tags_list()\n # for tag in obj.tags.split(','):\n # all_tags_list.append(tag)\n return all_tags_list", "def get_all_attributes(dataset):\n return dataset.flatMap(lambda x: x.keys()).distinct().collect()", "def get_keys_for_tag(self,tag):\r\n\r\n #using database\r\n if self.using_database:\r\n value_tuple = (notebookname, tag,)\r\n db_cursor.execute(\"SELECT keyword\"\r\n +\" FROM tags_to_keys\"\r\n +\" WHERE notebook=?\"\r\n +\" AND tag=?;\",\r\n value_tuple)\r\n fetched = db_cursor.fetchall()\r\n if fetched:\r\n return {tag[0] for tag in fetched}\r\n\r\n return set()\r\n #using shelf\r\n if self.using_shelf:\r\n if self.tag_dict_contains(tag):\r\n return self.tag_dict[tag]\r\n return set()", "def get_by_tag(cls, tag):\n out = []\n \n tags = Tag.expand_implied_by([tag])\n \n for t in tags:\n results = cls.objects.filter(owner=tag.owner, tags=t)\n \n for b in results:\n if b not in out:\n out.append(b)\n \n return out", "def get_unique_values(local_data, attr):\n\tvalues = []\n\tfor element in local_data:\n\t\tif element[attr] not in values:\n\t\t\tvalues.extend([element[attr]])\n\treturn values", "def tags(self) -> list[str]:\n _args: list[Arg] = []\n _ctx = self._select(\"tags\", _args)\n return _ctx.execute_sync(list[str])", "def get_all_id_and_tags(self):\n return self.database.select(self.tname,\n [self.primary_key, 'tags'])", "def tag_strings(self):\n return [tag.tag_text for tag in self.tags.all()]", "def normalize_tags(tags):\n ret = []\n dupes = NormalizedDict({'': 1})\n for tag in tags:\n if not dupes.has_key(tag):\n ret.append(tag)\n dupes[tag] = 1\n ret.sort(lambda x, y: cmp(normalize(x), normalize(y)))\n return ret", "def unique_drugs(self):\n if self.results is not None:\n return tuple(self.results['drug'].unique())", "def tags(self) -> List[str]:\n return self._db_data.tags", "def get_interests_each_member(self, username):\r\n database = main.connect_to_cloudsql()\r\n cursor = database.cursor()\r\n cursor.execute(\"SELECT tag FROM \" + ENV_DB + \".UserTags WHERE username='\" + username + \"'\")\r\n data = cursor.fetchall()\r\n database.close()\r\n return set([i[0] for i in data])", "def get_uniqueZIP(df):\n return df.ZIP.unique().tolist()", "def tags(self, tagmap=None):\n\n tags = self.find(\n 'guidle:classifications/'\n 'guidle:classification[@type=\"PRIMARY\"]/'\n 'guidle:tag'\n )\n tags = [tag.get('subcategoryName') or tag.get('name') for tag in tags]\n tags = set([tag for tag in tags if tag])\n if tagmap:\n return (\n {tagmap[tag] for tag in tags if tag in tagmap},\n tags - tagmap.keys()\n )\n return tags, set()" ]
[ "0.7043857", "0.68312377", "0.66289926", "0.66086257", "0.66025114", "0.65639853", "0.6363107", "0.63457936", "0.6314063", "0.62717646", "0.62587076", "0.6241535", "0.61744475", "0.61730134", "0.61709344", "0.61509275", "0.61432326", "0.6142673", "0.6115405", "0.6106224", "0.6100612", "0.6088263", "0.6042513", "0.60380244", "0.6015892", "0.60118437", "0.5968022", "0.59623474", "0.5937879", "0.5920417" ]
0.79654676
0
goes through all the reservation records that are due today and sends and email. An email should be sent by invoking a task to a worker pool
def notify(self): Reservation = self.db_con.table_data['reservations'] Restaurant = self.db_con.table_data['restaurants'] data = self.db_con.session.query(Reservation, Restaurant).\ filter(Reservation.restaurant_id == Restaurant._id).\ filter(Reservation.date == datetime.date.today()) for row in data: self.send_email(row.email, f'Your reservation at {row.name}', f'This is a reminder of your for ' f'location {row.address}, {row.time},' f'a table for {row.guests}')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def recs():\n click.echo(\"Emailing recommendations to destination...\")\n dio_dir: DioDir = DioDir()\n sched: ScheduleABC = DefaultSchedule()\n today: datetime.date = datetime.datetime.now().date()\n res: Optional[List[Person]] = get_recs(dio_dir, sched, today)\n next_day: datetime.date = sched.next_emailing_day(today)\n message: str = recs_to_message(res, next_day)\n settings: Optional[Settings] = dio_dir.get_settings()\n assert settings is not None, \"Have to setup diogenes to get emails. Run `dio setupemail`\"\n send_message(message, today, settings)\n click.echo(\"Recommendations emailed!\")", "def to_do_fehrist_tasks_reminder():\n\n from todofehrist.models import Task, User\n from todofehrist.utility import send_email\n\n result = Task.objects.filter(\n completion_status=0, completion_datetime__date=date.today()).values(\"user\").annotate(\n count=Count(\"user\"))\n\n for user_tasks_entry in result:\n email_address = User.objects.get(pk=user_tasks_entry[\"user\"]).email\n send_email(\"ToDoFehrist - Pending Tasks Reminder\",\n f\"You have {user_tasks_entry['count']} pending tasks due today.\",\n email_address)\n\n logging.debug(f\"Reminder Email sent to user with email address {email_address}\")", "def notify_students():\n time_now = datetime.datetime.now(get_localzone())\n emails_to_send = Email.objects.all()\n for email in emails_to_send:\n if email.assignment.date_assigned <= time_now:\n send_mail(subject=email.subject,\n message=email.message,\n recipient_list=Student.objects.filter(assignments=email.assignment),\n from_email=None,\n fail_silently=False)\n email.delete()", "def execute_automatic_email(self, request, pk=None):\n try:\n retreat = Retreat.objects.get(pk=pk)\n except Exception:\n response_data = {\n 'detail': \"Retreat not found\"\n }\n return Response(response_data, status=status.HTTP_400_BAD_REQUEST)\n\n try:\n email = AutomaticEmail.objects.get(\n id=int(request.GET.get('email'))\n )\n except Exception:\n response_data = {\n 'detail': \"AutomaticEmail not found\"\n }\n return Response(response_data, status=status.HTTP_400_BAD_REQUEST)\n\n # Notify a user for every reserved seat\n emails = []\n for reservation in retreat.reservations.filter(is_active=True):\n if reservation.automatic_email_logs.filter(email=email):\n pass\n else:\n send_automatic_email(reservation.user, retreat, email)\n AutomaticEmailLog.objects.create(\n reservation=reservation,\n email=email\n )\n emails.append(reservation.user.email)\n\n response_data = {\n 'stop': True,\n 'emails': emails\n }\n return Response(response_data, status=status.HTTP_200_OK)", "def _auto_email_send(self):\n records = self.search([('send_by', '=', 'mail')])\n\n for supplier in records:\n send_at = datetime.combine(fields.Date.today(),\n float_to_time(supplier.automatic_email_time, supplier.moment, supplier.tz)).astimezone(pytz.UTC).replace(tzinfo=None)\n if supplier.available_today and fields.Datetime.now() > send_at:\n lines = self.env['lunch.order'].search([('supplier_id', '=', supplier.id),\n ('state', '=', 'ordered'), ('date', '=', fields.Date.today())])\n\n if lines:\n order = {\n 'company_name': lines[0].company_id.name,\n 'currency_id': lines[0].currency_id.id,\n 'supplier_id': supplier.partner_id.id,\n 'supplier_name': supplier.name,\n 'email_from': supplier.responsible_id.email_formatted,\n }\n\n _lines = [{\n 'product': line.product_id.name,\n 'note': line.note,\n 'quantity': line.quantity,\n 'price': line.price,\n 'toppings': line.display_toppings,\n 'username': line.user_id.name,\n } for line in lines]\n\n order['amount_total'] = sum(line.price for line in lines)\n\n self.env.ref('lunch.lunch_order_mail_supplier').with_context(order=order, lines=_lines).send_mail(supplier.id)\n\n lines.action_confirm()", "def delegate_last_day():\n\n regs = Registration.objects.all()\n\n template = 'notifications/last_day_mail.html'\n\n for reg in regs:\n subject = 'SciPy.in 2011: Schedule and other details'\n message = loader.render_to_string(\n template, dictionary={'name': reg.registrant.username})\n\n reg.registrant.email_user(subject=subject, message=message,\n from_email='[email protected]')", "def task_send_reminder_email():\n send_reminder_email()\n logger.info(\"Sent reminder email\")", "def task_rescheduled_notify(name, attempts, last_error, date_time, task_name, task_params):\n body = loader.render_to_string(\n 'notification/email/notify_rescheduled_task.html', {\n 'name': name,\n 'attempts': attempts,\n 'last_error': last_error,\n 'date_time': date_time.strftime(\"%Y-%m-%d %H:%M:%S\"),\n 'task_name': task_name,\n 'task_params': task_params,\n 'signature': settings.EMAIL_SIGNATURE\n })\n subject = name + \" has been rescheduled\"\n mail_admins(subject, body, settings.DEFAULT_FROM_EMAIL)", "def handle(self, *args, **options):\n\n candidates_with_email = [candidate for candidate in Candidate.objects.all()\n if candidate.contact_address and candidate.participating]\n\n\n print 'sending e-mails'\n conn = get_connection()\n for c in candidates_with_email:\n if c.should_send_reminder():\n\n print 'emailing', c\n # store timestamp for reminder email so that they don't get another one for <REMINDER_TIME_PERIOD> days\n c.last_reminder_sent = timezone.now()\n c.save()\n msg = make_email(c)\n conn.send_messages([msg])\n conn.close()", "def extra_tasks_for_today(self):\n localtz = tzlocal()\n datetime_today = datetime.fromtimestamp(rospy.get_rostime().to_sec(), tz=localtz)\n day_today = datetime_today.strftime(\"%A\")\n date_today = datetime_today.date()\n rospy.loginfo('Looking for daily tasks for %s, %s' % (day_today, date_today))\n \n eight_forty_five= time(8,45, tzinfo=localtz)\n eleven_thirty= time(11,30, tzinfo=localtz)\n fourteen_thirty=time(14,30, tzinfo=localtz)\n seventeen_fifteen= time(17,15, tzinfo=localtz)\n past_bedtime = time(23,59, tzinfo=localtz)\n \n # day_end = seventeen_fifteen\n day_end = past_bedtime\n\n\n\n metric_wps=['WayPoint13', 'WayPoint18', 'WayPoint9','WayPoint11','WayPoint5','WayPoint3'] \n object_learn_wps=['WayPoint13', 'WayPoint18', 'WayPoint9', 'WayPoint11'] \n object_search_wps=['WayPoint1', 'WayPoint2', 'WayPoint3']\n door_wps=['WayPoint7', 'WayPoint4']\n \n morning_start = eight_forty_five\n morning_duration = delta_between(eleven_thirty, morning_start)\n \n lunch_start = eleven_thirty\n lunch_duration = delta_between(fourteen_thirty, lunch_start)\n\n afternoon_start = fourteen_thirty\n afternoon_duration = delta_between(day_end, afternoon_start)\n\n tasks = []\n \n #door checks at fixed times (to evaluate system ability to do stuff at corret times)\n task=create_door_check_task(door_wps[0])\n start_time=datetime.combine(date_today, time(10,30, tzinfo=localtz))\n end_time = start_time+timedelta(seconds=30)\n task.start_after=rospy.Time(unix_time(start_time))\n task.end_before=rospy.Time(unix_time(end_time))\n tasks.append(task)\n \n task=create_door_check_task(door_wps[0])\n start_time=datetime.combine(date_today, time(13,30, tzinfo=localtz))\n end_time = start_time+timedelta(seconds=30)\n task.start_after=rospy.Time(unix_time(start_time))\n task.end_before=rospy.Time(unix_time(end_time))\n tasks.append(task)\n \n task=create_door_check_task(door_wps[0])\n start_time=datetime.combine(date_today, time(16,30, tzinfo=localtz))\n end_time = start_time+timedelta(seconds=30)\n task.start_after=rospy.Time(unix_time(start_time))\n task.end_before=rospy.Time(unix_time(end_time))\n tasks.append(task)\n \n \n #random tasks\n for i in range(4):\n #morning\n task=create_metric_map_task(random.choice(metric_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n task=create_door_check_task(random.choice(door_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n if i<3:\n task=create_object_learn_task(random.choice(object_learn_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n task=create_object_search_task(random.choice(object_search_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n #lunch (less tasks because we want the robot mostly learning people tracks)\n if i<1:\n task=create_metric_map_task(random.choice(metric_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n task=create_door_check_task(random.choice(door_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n task=create_object_learn_task(random.choice(object_learn_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n task=create_object_search_task(random.choice(object_search_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n \n #afternoon\n task=create_metric_map_task(random.choice(metric_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n \n task=create_door_check_task(random.choice(door_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n \n if i<3:\n task=create_object_learn_task(random.choice(object_learn_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n \n task=create_object_search_task(random.choice(object_search_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n return tasks", "def run_now(self, now, sql, index):\n log.debug(\"Running notification %s: %s\", self.id, self.name)\n if not debug:\n sql.insert(\"UPDATE notification SET run=DATE(:now) WHERE notification_id=:notification_id\", notification_id=self.id, now=now)\n for trigger in self.triggers:\n if trigger.column is not None:\n for values in sql.selectAllDict(\"SELECT * FROM booking, user, enum, enum_entry WHERE DATE(:now) >= date(booking.{0}, '{1} DAYS') AND booking.user_id=user.user_id AND enum.field='user' AND enum.enum_id=enum_entry.enum_id AND enum_entry.value=user.user_id\".format(trigger.column, trigger.days), now=now):\n asset = index.get(values['asset_id'])\n if asset is None:\n log.debug(\"Asset with id %d no longer exists - skipping\", values['asset_id'])\n continue\n if trigger._filter(values, asset):\n yield self._mail(sql, values, asset)\n elif trigger.field is not None:\n if trigger.days >= 0:\n q = '{0}:[* TO {1}{2}DAYS]'.format(trigger.field, now.upper(), self._sign(-trigger.days))\n else:\n q = '{0}:[{1}{2}DAYS TO *]'.format(trigger.field, now.upper(), self._sign(trigger.days))\n #FIXME here and below it would be better to allow SOLR to do the filtering\n for asset in filter(lambda x: trigger._filter(None, x), index.search({'q': q, 'rows': 100000})['response']['docs']):\n yield self._mail(sql, None, asset)\n else:\n # it's a report, which means trigger all assets (satisfying filters), and group hits into one email\n assets = filter(lambda x: trigger._filter(None, x), index.search({'q': '*', 'rows': 100000})['response']['docs'])\n yield self._mail(sql, None, None, assets)", "def weekly_report(request):\n if TimeCheck().is_ready():\n # get the list of items for the email\n # this will include all active items with an expiration date\n # that occurs within the next 31 days\n exclude_date = dt.now() + datetime.timedelta(days=31)\n items = StockItem.objects\\\n .filter(active=True)\\\n .exclude(date_of_expiration__gt=exclude_date)\\\n .order_by('date_of_expiration')\n response = send_weekly_report(items)\n return HttpResponse(response.content)\n else:\n return HttpResponse('It is too soon to send another email.')", "def post(self, days):\n if int(days) <= 0:\n return 'Days can not be smaller than 0', 400\n cur_time = datetime.utcnow()\n expired_time = cur_time + timedelta(days=int(days))\n toSend = db.session.query(models.Order).filter_by(status=ORDER_STATUS_ACCPETED).filter(\n models.Order.expire >= cur_time,\n models.Order.expire <= expired_time)\n if toSend is None:\n return 'No reminders needed to be sent.', 200\n try:\n for record in toSend:\n email = query_user_by_name(record.borrower).email\n if email is None:\n continue\n book_copy = query_copy_by_id(record.copy)\n book = query_book_by_id(book_copy.book)\n EmailSender.send_email(email, record.borrower, book.title, record.expire)\n return 'Reminders sent successfully!', 201\n except Exception as e:\n return 'Error in sending reminders', 404", "def send_notifications():\n due_notifications = Notification.query.filter(Notification.delivery_date <= datetime.now(timezone.utc))\n for notification in due_notifications:\n send_notification.delay(notification.id)", "def send_email_week():\n\n cars_all = Car.objects.all()\n title_list = []\n today = now()\n for car in cars_all:\n if (today.day - car.created.day) > 7:\n new_car = car.title\n title_list.append(new_car)\n\n for item in Subscriber.objects.all():\n email_adress = item.email\n data = {\n 'email': email_adress,\n 'title': title_list,\n }\n email_body = render_to_string('main/email_add_ad.html', data)\n msg = EmailMultiAlternatives(subject='Обьявления машин', to=[email_adress, ])\n msg.attach_alternative(email_body, 'text/html')\n msg.send()", "def send_assignee_emails(self):\n\n assignees = list(set([obj.assignee for obj in self.stalled_nf_issues])) # Assignees from New Features\n assignees.extend(list(set([obj.assignee for obj in self.stalled_st_issues]))) # Add assignees from Sub-tasks\n recipients = self.config.get(\"recipients\", \"emails\").split(\"\\n\") # [recipients] section in .ini file\n\n for assignee in assignees:\n assignee_issues = [] # List of IssueClass objects\n # Get all stalled New feature issues for this assignee\n for item in self.stalled_nf_issues + self.stalled_st_issues:\n if item.assignee == assignee:\n# if item.assignee == \"ashih\":\n assignee_issues.append(item)\n assignee_email = item.assignee_email\n \n if len(assignee_issues):\n html_table = '<table style=\"font-size:12px\">'\n html_table += self.make_time_in_status_rows(assignee_issues)\n html_table += '</table>' # Closing table tag\n #recipients.append(assignee_email)\n print \"Sending email to: %s\" % recipients\n self.send_email(recipients, html_table, assignee)", "def execute(self):\n return LOGGER.info(f\"{datetime.datetime.now()} - Sending EMail to the configured email list\")", "def send_reminder(self):\n message_contents = \"This is a reminder that your event: \" + self.event_title + \" takes place on \" + self.event_date + \" in \" + self.event_location\n subject = \"Event Reminder\"\n attendees = self.gameplanuser_set.all()\n for attendee in attendees:\n remindermessage = Message.objects.create(sender=self.event_manager, recipient=attendee, contents=message_contents)\n remindermessage.save()", "def notify_users_of_reminders():\n\n #Get current date into dd/mm/YYYY format.\n now = datetime.datetime.now()\n todays_date = now.strftime(\"%d/%m/%Y\")\n\n #Get current time and convert it to hh:mm.\n todays_time = now.strftime(\"%H:%M\")\n print(todays_time)\n\n #Select all notifications from the database based on that date and time.\n notifications_query = \"\"\"SELECT user, reminder_msg FROM reminders WHERE (date=%s AND time=%s);\"\"\"\n\n #Setup our parameters\n notifications_params = (todays_date, todays_time)\n\n #TODO: Add in cursor.\n #TODO: Run query and get reminder data.\n #TODO: Loop over returned rows, and notify users with send_message_to_irc()", "def dryrecs():\n click.echo(\"Recommendations, not emailed: \")\n dio_dir: DioDir = DioDir()\n sched: ScheduleABC = DefaultSchedule()\n today: datetime.date = datetime.datetime.now().date()\n res: Optional[List[Person]] = get_recs(dio_dir, sched, today)\n next_day: datetime.date = sched.next_emailing_day(today)\n click.echo(recs_to_message(res, next_day))", "def test_changing_date(self):\n days = 2\n appt_date = datetime.date.today() + datetime.timedelta(days=days)\n reminders.Patient.objects.filter(\n pk__in=[self.test_patient.pk, self.other_patient.pk]\n ).update(next_visit=appt_date)\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n unconfirmed = self.create_unconfirmed_notification(self.other_patient, appt_date)\n\n self.startRouter()\n self.router.logger.setLevel(logging.DEBUG)\n\n # run email job\n from afrims.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router, days=days)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertPatientInMessage(message, self.test_patient)\n self.assertPatientInMessage(message, self.other_patient)\n self.assertPatientNotInMessage(message, self.unrelated_patient)\n self.stopRouter()", "def recap(self, request, pk=None):\n retreat = self.get_object()\n # This is a hard-coded limitation to allow anonymous users to call\n # the function.\n time_limit = retreat.end_time - timedelta(days=1)\n if timezone.now() < time_limit:\n response_data = {\n 'detail': \"Retreat ends in more than 1 day.\"\n }\n return Response(response_data, status=status.HTTP_200_OK)\n\n # Notify a user for every reserved seat\n emails = []\n for reservation in retreat.reservations.filter(\n is_active=True,\n post_event_send=False):\n send_post_retreat_email(reservation.user, retreat)\n reservation.post_event_send = True\n reservation.save()\n emails.append(reservation.user.email)\n\n response_data = {\n 'stop': True,\n 'emails': emails\n }\n return Response(response_data, status=status.HTTP_200_OK)", "def send_today_reminder(self, request, pk=None):\n try:\n today_menu = Menu.objects.get(\n menu_date=timezone.localtime(timezone.now()).date()\n )\n today_menu.send_today_menu_slack_each_user()\n except Menu.DoesNotExist:\n return Response({\"detail\": \"Not found.\"}, status=status.HTTP_404_NOT_FOUND)\n return Response(\n {\"detail\": \"Reminder sent successfully.\"}, status=status.HTTP_200_OK\n )", "async def send_tickets_available(self, event_id: int) -> str:\n async with self.pg.acquire() as conn:\n tickets_remaining = await conn.fetchval(\n 'SELECT check_tickets_remaining($1, $2)', event_id, self.settings.ticket_ttl\n )\n if tickets_remaining == 0:\n return 'no tickets remaining'\n\n user_ids = await conn.fetchval(\n \"\"\"\n select array_agg(user_id) from waiting_list\n where event=$1 and now() - last_notified > '1 day'\n \"\"\",\n event_id,\n )\n if not user_ids:\n return 'no users in waiting list'\n\n data = await conn.fetchrow(\n \"\"\"\n SELECT\n c.company AS company_id,\n e.name AS event_name,\n event_link(c.slug, e.slug, e.public, $2) AS event_link,\n e.start_ts > now() AS in_future\n FROM events AS e\n JOIN categories AS c ON e.category = c.id\n WHERE e.id=$1\n \"\"\",\n event_id,\n self.settings.auth_key,\n )\n if not data['in_future']:\n # don't send the email if the event is in the past\n return 'event in the past'\n\n await conn.execute_b(\n 'INSERT INTO actions (:values__names) VALUES :values',\n values=Values(company=data['company_id'], event=event_id, type=ActionTypes.email_waiting_list.value),\n )\n # do this before sending emails so even if something fails we don't send lots of emails\n await conn.execute(\n 'update waiting_list set last_notified=now() where event=$1 and user_id=any($2)', event_id, user_ids\n )\n ctx = {\n 'event_link': data['event_link'],\n 'event_name': data['event_name'],\n }\n\n def remove_link(user_id):\n return (\n f'/api/events/{event_id}/waiting-list/remove/{user_id}/'\n f'?sig={waiting_list_sig(event_id, user_id, self.settings)}'\n )\n\n users = [UserEmail(uid, {'remove_link': remove_link(uid), **ctx}) for uid in user_ids]\n await self.send_emails.direct(data['company_id'], Triggers.event_tickets_available, users)\n return f'emailed {len(user_ids)} users'", "def send_reminder(self):\n pass", "async def send_event_reminders(self):\n async with self.pg.acquire() as conn:\n # get events for which reminders need to be send\n events = await conn.fetch(\n \"\"\"\n SELECT\n e.id, e.name, e.short_description, e.start_ts, e.timezone, e.duration,\n e.location_name, e.location_lat, e.location_lng,\n cat.name AS cat_name, cat.slug AS cat_slug, cat.company AS company_id,\n event_link(cat.slug, e.slug, e.public, $1) AS event_link,\n full_name(uh.first_name, uh.last_name) AS host_name\n FROM events AS e\n JOIN users AS uh on e.host = uh.id\n JOIN categories AS cat ON e.category = cat.id\n WHERE e.status='published' AND\n e.start_ts BETWEEN now() AND now() + '24 hours'::interval AND\n e.id NOT IN (\n SELECT event\n FROM actions\n WHERE type='event-guest-reminder' AND\n ts > now() - '25 hours'::interval\n )\n ORDER BY cat.company\n \"\"\",\n self.settings.auth_key,\n )\n if not events:\n return 0\n # create the 'event-guest-reminder' action so the events won't receive multiple reminders\n await conn.execute_b(\n 'INSERT INTO actions (:values__names) VALUES :values',\n values=MultipleValues(\n *[\n Values(company=e['company_id'], event=e['id'], type=ActionTypes.event_guest_reminder.value)\n for e in events\n ]\n ),\n )\n # get all users expecting the email for all events\n r = await conn.fetch(\n \"\"\"\n SELECT DISTINCT event, user_id, id AS ticket_id\n FROM tickets\n WHERE status='booked' AND event=ANY($1)\n ORDER BY event\n \"\"\",\n {e['id'] for e in events},\n )\n # group the users by event\n users = {\n event_id: {(t['user_id'], t['ticket_id']) for t in g} for event_id, g in groupby(r, itemgetter('event'))\n }\n\n user_emails = 0\n for d in events:\n event_users = users.get(d['id'])\n if not event_users:\n continue\n start, duration = start_tz_duration(d)\n ctx = {\n 'event_link': d['event_link'],\n 'event_name': d['name'],\n 'host_name': d['host_name'],\n 'event_short_description': d['short_description'],\n 'event_start': format_dt(start),\n 'event_duration': format_duration(duration) if duration else 'All day',\n 'event_location': d['location_name'],\n 'category_name': d['cat_name'],\n is_cat(d['cat_slug']): True,\n }\n lat, lng = d['location_lat'], d['location_lng']\n if lat and lng:\n ctx.update(\n static_map=static_map_link(lat, lng, settings=self.settings),\n google_maps_url=f'https://www.google.com/maps/place/{lat},{lng}/@{lat},{lng},13z',\n )\n user_ctxs = [UserEmail(id=user_id, ctx=ctx, ticket_id=ticket_id) for user_id, ticket_id in event_users]\n await self.send_emails(\n d['company_id'], Triggers.event_reminder.value, user_ctxs, attached_event_id=d['id'],\n )\n user_emails += len(event_users)\n return user_emails", "def email_outstanding_fires(region_id=None):\n qs = Bushfire.objects.filter(report_status__in=[Bushfire.STATUS_INITIAL_AUTHORISED])\n rpt_date = datetime.now()\n\n for row in settings.OUTSTANDING_FIRES_EMAIL:\n for region_name,email_to in row.iteritems():\n\n try:\n region = Region.objects.get(name=region_name)\n except:\n region = None\n traceback.print_exc()\n\n if region:\n f = StringIO()\n book = Workbook()\n total_reports = outstanding_fires(book, region, qs, rpt_date)\n book.add_sheet('Sheet 2')\n book.save(f)\n\n if total_reports == 0:\n subject = 'Outstanding Fires Report - {} - {} - No Outstanding Fire'.format(region_name, rpt_date.strftime('%d-%b-%Y')) \n body = 'Outstanding Fires Report - {} - {} - No Outstanding Fire'.format(region_name, rpt_date.strftime('%d-%b-%Y')) \n elif total_reports == 1:\n subject = 'Outstanding Fires Report - {} - {} - 1 Outstanding Fire'.format(region_name, rpt_date.strftime('%d-%b-%Y')) \n body = 'Outstanding Fires Report - {} - {} - 1 Outstanding Fire'.format(region_name, rpt_date.strftime('%d-%b-%Y')) \n else:\n subject = 'Outstanding Fires Report - {} - {} - {} Outstanding Fires'.format(region_name, rpt_date.strftime('%d-%b-%Y'),total_reports) \n body = 'Outstanding Fires Report - {} - {} - {} Outstanding Fires'.format(region_name, rpt_date.strftime('%d-%b-%Y'),total_reports) \n\n message = EmailMessage(subject=subject, body=body, from_email=settings.FROM_EMAIL, to=email_to, cc=settings.CC_EMAIL, bcc=settings.BCC_EMAIL)\n if total_reports > 0:\n filename = 'outstanding_fires_{}_{}.xls'.format(region_name.replace(' ', '').lower(), rpt_date.strftime('%d-%b-%Y'))\n message.attach(filename, f.getvalue(), \"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\") #get the stream and set the correct mimetype\n\n message.send()", "def send_msg_scheduled_events():\n \n contact_all = Contact.objects.all()\n scheduled_events_all = ScheduledEvent.objects.all()\n\n connections_to_send = Connection.objects.none()\n\n for event in scheduled_events_all:\n connections_to_send = Connection.objects.none()\n for contact in contact_all:\n if (event.event_date - contact.date_of_birth).days >= event.days:\n contact_conn = Connection.objects.filter(contact=contact)\n connections_to_send = connections_to_send | contact_conn\n\n for conn in connections_to_send:\n send(event.msg_to_send, conn)", "def test_changing_date(self):\n days = 2\n appt_date = datetime.date.today() + datetime.timedelta(days=days)\n confirmed = self.create_confirmed_notification(self.test_patient,\n appt_date)\n unconfirmed = self.create_unconfirmed_notification(self.other_patient,\n appt_date)\n\n # run email job\n from aremind.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router, days=days)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertPatientInMessage(message, self.test_patient)\n self.assertPatientInMessage(message, self.other_patient)\n self.assertPatientNotInMessage(message, self.unrelated_patient)", "def email_body_cancellation_from_buyer_within_24_hours(sellr_name, cost):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\"> You cancelled the appointment with <a href=\"#\" style=\"color:#1488CC\">' + sellr_name + '</a>.<br><br>'\n\tmsg = msg + '\\t\\t\\t We know life can be busy, but we also value accountability within the community and adhere to a <a href=\"#\" style=\"color:#1488CC\">24-hour cancellation policy</a>. You will be charged <a href=\"#\" style=\"color:#1488CC\">$' + str(cost) + '</a> for the service. <br><br>'\n\tmsg = msg + '\\t\\t\\t Questions? <a href=\"#\" style=\"color:#1488CC\">Drop us a line</a> or read our <a href=\"#\" style=\"color:#1488CC\">Terms of Service</a> and <a href=\"#\" style=\"color:#1488CC\">cancellation policies</a> for additional information. </font><br><br>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg" ]
[ "0.6832567", "0.6828355", "0.67634314", "0.67304164", "0.66211", "0.65371364", "0.6534062", "0.65245", "0.6519359", "0.6350347", "0.6347726", "0.6204631", "0.6189868", "0.61646235", "0.61506426", "0.61425835", "0.61145645", "0.60711306", "0.6048367", "0.597328", "0.5964545", "0.59600276", "0.5956821", "0.59511924", "0.5925281", "0.5922691", "0.5914704", "0.59140795", "0.5908889", "0.58637166" ]
0.76157784
0
Process text using the same text processing procedure as was used in the DTM/TFIDF models, and recreate the length column with the cleaned text strings. This results in a more accurate length metric.
def process_length_in_place(flora_data_frame, tokenized_stop_words): before_process_length = flora_data_frame.text.apply(len) # Applying the same text processing used in the DTM/TFIDF models flora_data_frame.text = process_text_tokenize_detokenize(flora_data_frame.text, tokenized_stop_words) # Remove strings with no textual data flora_data_frame_no_empty = flora_data_frame[locate_empty_strings(flora_data_frame.text)] assert flora_data_frame_no_empty.shape[0] < flora_data_frame.shape[0], 'Rows with empty text strings not removed' after_process_length = flora_data_frame_no_empty.text.apply(len) assert sum(after_process_length) < sum(before_process_length), 'Text not processed' # Add new length data to data frame length_processed_flora_data_series = pd.concat( [flora_data_frame_no_empty.text, after_process_length.rename('length')], axis=1) flora_data_frame_no_empty = flora_data_frame_no_empty.drop(columns='length') flora_data_frame_no_empty = flora_data_frame_no_empty.drop(columns='text') flora_data_frame_no_empty = pd.concat([flora_data_frame_no_empty, length_processed_flora_data_series], axis=1) return flora_data_frame_no_empty
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_text_length(row):\n derived_series = pd.read_json(json.dumps(row['text_derived']), typ='series')\n derived_series = pd.Series(derived_series)\n row[\"tweet_text_length_derived\"] = derived_series.str.len()\n return row[\"tweet_text_length_derived\"]", "def process_text(self, text):\n\n flags = (re.UNICODE if sys.version < '3' and type(text) is unicode # noqa: F821\n else 0)\n pattern = r\"\\w[\\w']*\" if self.min_word_length <= 1 else r\"\\w[\\w']+\"\n regexp = self.regexp if self.regexp is not None else pattern\n\n words = re.findall(regexp, text, flags)\n # remove 's\n words = [word[:-2] if word.lower().endswith(\"'s\") else word\n for word in words]\n # remove numbers\n if not self.include_numbers:\n words = [word for word in words if not word.isdigit()]\n # remove short words\n if self.min_word_length:\n words = [word for word in words if len(word) >= self.min_word_length]\n\n stopwords = set([i.lower() for i in self.stopwords])\n if self.collocations:\n word_counts = unigrams_and_bigrams(words, stopwords, self.normalize_plurals, self.collocation_threshold)\n else:\n # remove stopwords\n words = [word for word in words if word.lower() not in stopwords]\n word_counts, _ = process_tokens(words, self.normalize_plurals)\n\n return word_counts", "def process_text(self):\n prp1 = preprocessor.Preprocess()\n processed_text = prp1.clean_data(self.text)\n self.vec1 = self.vec.transform(pd.Series(processed_text))", "def text_len(self, text):\n length = len(word_tokenize(text))\n return length", "def _text_length(self, text):\n\n if isinstance(text, dict): # {key: value} case\n return len(next(iter(text.values())))\n elif not hasattr(text, '__len__'): # Object has no len() method\n return 1\n elif len(text) == 0 or isinstance(text[0], int): # Empty string or list of ints\n return len(text)\n else:\n return sum([len(t) for t in text]) # Sum of length of individual strings", "def analyze_text (self, testing_string): \n self.length = len(self.testing_string)\n self.total_words = (self.testing_string).split()\n self.total_unique_words = set(self.total_words)\n\n self.total_characters = (int)(0)\n for ch in self.testing_string :\n if(ch.isspace() != True):\n self.total_characters = self.total_characters + 1 \n\n self.total_unique_characters = set(self.testing_string)\n \n Linguist.about_given_string[\"Length\"] = self.length\n Linguist.about_given_string[\"Total_words\"] = len(self.total_words)\n Linguist.about_given_string[\"Total_unique_words\"] = len(self.total_unique_words)\n Linguist.about_given_string[\"Total_characters\"] = self.total_characters\n Linguist.about_given_string[\"Total_unique_characters\"] = len(self.total_unique_characters)", "def get_text_width(self, text: str) -> float:\n pass", "def makeWordLengths(self):\r\n clean_s = self.cleanString(self.text)\r\n LoW = clean_s.split() \r\n for x in LoW: \r\n if len(x) not in self.wordlengths: \r\n self.wordlengths[len(x)] = 1\r\n else: \r\n self.wordlengths[len(x)] += 1\r\n return self.wordlengths", "def LEN(text):\n return len(text)", "def getLength(self, text):\n\n return len(text[self.table_header[0]])", "def process_text(self, text: str, max_length: int) -> Dict[str, Sequence[int]]:\n inputs = self.tokenizer(\n [c for c in text],\n return_token_type_ids=True,\n return_attention_mask=True,\n max_length=max_length,\n padding=\"max_length\",\n truncation=True,\n is_pretokenized=True,\n )\n return inputs.data", "def preprocess_text(df, text_column_name=\"law_akn_text\"):\n words_with_quotes = get_words_with_quotes()\n\n # removing extra newlines from all the text\n df[text_column_name] = df[text_column_name].apply(\n lambda x: remove_extra_newlines(x))\n # replacing special double quotes with regular in all the text\n df[text_column_name] = df[text_column_name].apply(\n lambda x: replace_quotes(x))\n # replacing special quote with regular in all the text\n df[text_column_name] = df[text_column_name].apply(\n lambda x: replace_single_quote(x))\n # padding colon with spaces\n df[text_column_name] = df[text_column_name].apply(\n lambda x: replace_dots(x))\n # padding semi-colon with spaces\n df[text_column_name] = df[text_column_name].apply(\n lambda x: replace_dot_coma(x))\n # padding dot with spaces\n df[text_column_name] = df[text_column_name].apply(lambda x: replace_dot(x))\n # padding brackets with spaces\n df[text_column_name] = df[text_column_name].apply(\n lambda x: replace_brackets(x))\n # padding commas with spaces\n df[text_column_name] = df[text_column_name].apply(\n lambda x: replace_coma(x))\n # padding brackets with spaces\n df[text_column_name] = df[text_column_name].apply(\n lambda x: replace_more_special_characters(x))\n # replacing all the quotes with a generic one\n df[text_column_name] = df[text_column_name].apply(\n lambda x: replace_quotes_with_quotes_and_spaces(x))\n # padding words containing quotes with spaces\n df[text_column_name] = df[text_column_name].apply(\n lambda x: make_space_quotes(x, words_with_quotes))\n # removing extra whitespace\n df[text_column_name] = df[text_column_name].apply(\n lambda x: replace_extra_whitespaces_into_one(x))\n return df", "def analyse(self):\n logging.info(\"transferring text to CorpusCook...\")\n\n paragraphs = self.text.split('\\n\\n')\n print(\"mean length of splitted lines\", (mean([len(p) for p in paragraphs])))\n\n # If TIKA resolved '\\n'\n if (mean([len(p) for p in paragraphs])) > 80:\n paragraphs = [re.sub(r\"- *\\n\", '', p) for p in paragraphs]\n paragraphs = [p.replace('\\n', \" \") for p in paragraphs]\n paragraphs = [p.replace(';', \" \") for p in paragraphs]\n joiner = \" \"\n else:\n # If TIKA did not\n joiner = \" \"\n\n processed_text = joiner.join([p\n for p in paragraphs\n if\n p and\n ks_2samp(self.normal_data, list(p)).pvalue > self.threshold\n ]\n )\n\n return processed_text.strip()[:self.length_limit]", "def compute_user_description_text_length(row):\n row[\"user_description_text_length\"] = len(row['user_description'])\n return row[\"user_description_text_length\"]", "def makeSentenceLengths(self):\r\n count = 0\r\n LoW = self.text.split()\r\n list = []\r\n for x in range(len(LoW)): \r\n if '.' in LoW[x] or '?' in LoW[x] or '!' in LoW[x] : \r\n length = x\r\n list += [len(LoW[count: x+1])]\r\n count = length + 1\r\n for x in list:\r\n if x not in self.sentencelengths :\r\n self.sentencelengths[x] = 1\r\n else:\r\n self.sentencelengths[x] += 1", "def width(self, text):\n return len(text) * (self.font_width + 1)", "def preprocess(text):\n text = remove_space(text)\n text = clean_special_punctuations(text)\n text = handle_emojis(text)\n text = clean_number(text)\n text = spacing_punctuation(text)\n text = clean_repeat_words(text)\n text = remove_space(text)\n #text = stop(text)# if changing this, then chnage the dims \n #(not to be done yet as its effecting the embeddings..,we might be\n #loosing words)...\n return text", "def count_words(self, clean_func=clean_up):\n return (\n len(clean_func(self.transcript_file.text()).split())\n if self.validate()\n else 0\n )", "def calc_text_size(self, text, font):\n w = 0\n for c in text:\n o = ord(c)\n if o > 0xff: # Translate Cyrillic Unicode to ASCII\n o -= 848\n if o > 255:\n o = 32\n w += font.char_size(o)[1]\n return(w, font.height())", "def text_width(text):\n # Really crude guess would be: return len(text)/2\n return sum(GLYPH_WIDTHS.get(c, .5) for c in text)", "def process_text(input_txt):\r\n # if input is string\r\n tidy_txt = remove_pattern(input_txt,\"@[\\w]*\")\r\n ##=============================== if input is dataframe ====================##\r\n # tidy_txt = np.vectorize(remove_pattern)(input_txt,\"@[\\w]*\") #\r\n ##==========================================================================##\r\n # remove special characters\r\n tidy_txt = tidy_txt.replace(\"[^a-zA-Z#]\",\" \")\r\n # split into words\r\n tokenized_txt = tidy_txt.split()\r\n # perform stemming\r\n stemmer = PorterStemmer()\r\n tokenized_txt = [stemmer.stem(i) for i in tokenized_txt]\r\n print(tokenized_txt)\r\n # joining words back\r\n tokenized_txt = ' '.join(tokenized_txt)\r\n return tokenized_txt", "def preprocess(tmp_df, preprocess=False):\n\n # all in one go in order to just have to tokenize once\n if preprocess:\n tmp_df[\"description\"] = tmp_df[\"description\"].apply(\n clean_stop_punct_digit_n_lower)\n # words = tmp_df['description'] \\\n # .str.split(expand=True).stack().value_counts()\n # ratio = tmp_df['description'].apply(remove_duplicate)\\\n # .str.split(expand=True).stack().value_counts() \\\n # / tmp_df.shape[0]\n # words.to_csv('freq_words.csv')\n # ratio.to_csv(\"ratio.csv\")\n\n return tmp_df", "def post_process_text(self, text):\n\t\treturn text", "def parse_text(self, source):\r\n\r\n global word_set\r\n line_count = 0\r\n word_count = 0\r\n self.vowels = self.analyse_vowels(source)\r\n\r\n with open(source) as f:\r\n for line in f:\r\n # Detect end of paragraph\r\n if line_count and not line.strip() or line.startswith(\"\\t\"):\r\n self.paragraph_sizes.add(line_count)\r\n line_count = 0\r\n \r\n words = line.split()\r\n for word in words:\r\n if not word:\r\n continue\r\n self.word_sizes.add(len(word))\r\n construction = self.calculate_construction(word)\r\n self.word_constructions.add(construction)\r\n word_count += 1\r\n\r\n # Check if this is the end of a line.\r\n if word[-1] in self.ENDING_PUNCTUATION:\r\n line_count += 1\r\n self.sentence_sizes.add(word_count)\r\n word_count = 0\r\n\r\n \r\n if not self.paragraph_sizes.is_empty():\r\n # Liable to not parse in certain sources.\r\n self.paragraph_sizes = probabilities.PARAGRAPH_SIZES", "def count(text):\n return len(text)", "def process_text(document):\n return preprocess_string(document,\n filters=[strip_tags, strip_punctuation,\n strip_multiple_whitespaces,\n strip_numeric, remove_stopwords,\n strip_short]\n )", "def vectorize_text(df: pd.DataFrame):\n # Creating a stop_words list set that are common to many questions.\n common_phrases = [\n 'read the sentence from the passage',\n 'which of the following best describes',\n 'which is the best one sentence * for the section',\n 'which sentence from the passage provides the most evidence'\n 'select the sentence that does not support the central idea of the article',\n 'supports the main idea',\n 'select the paragraph from the section that explains how that shows the ',\n 'that is most relevant to be included in the summary of the article',\n 'according to the article',\n 'which of these is not one',\n ]\n stop_words = stopwords.words('english')\n [stop_words.extend(x.split()) for x in common_phrases]\n\n ct_vectorizer = CountVectorizer(token_pattern='\\\\w{3,}',\n max_df=.3,\n min_df=.001,\n stop_words=list(set(stop_words)),\n strip_accents='ascii', # Faster than unicode.\n ngram_range=(1, 3), # Enable uni, bi, trigrams.\n lowercase=True,\n dtype='uint8')\n\n tfidf_vectorizer = TfidfVectorizer(token_pattern='\\\\w{3,}',\n max_df=.3,\n min_df=.001,\n stop_words=list(set(stop_words)),\n strip_accents='ascii', # Faster than unicode.\n ngram_range=(1, 3), # Enable uni, bi, trigrams.\n lowercase=True,\n sublinear_tf=True, # Replace tf with 1 + log(tf).\n smooth_idf=True, # Default 1 doc for each term.\n dtype=np.float32)\n\n # Count & tf-idf vectorization learns vocab and transforms data into matrices.\n ct_vec = ct_vectorizer.fit_transform(np.array(df.text))\n tfidf = tfidf_vectorizer.fit_transform(np.array(df.text))\n # print(\"Shape of ct_vec:\", ct_vec.shape)\n # print('Size of ct_vec:', sys.getsizeof(ct_vec))\n # print(\"Shape of tfidf:\", tfidf.shape)\n # print('Size of tfidf:', sys.getsizeof(tfidf), '\\n')\n\n ct_names = ct_vectorizer.get_feature_names()\n tf_names = tfidf_vectorizer.get_feature_names()\n\n df_cv = pd.concat(\n [df, pd.DataFrame(ct_vec.toarray(), columns=ct_names)],\n axis=1)\n df_tfidf = pd.concat(\n [df, pd.DataFrame(tfidf.toarray(), columns=tf_names)],\n axis=1)\n\n return (\n df_cv,\n ct_vec,\n ct_names,\n df_tfidf,\n tfidf,\n tf_names\n )", "def cleanupText(path):\n \n text_cleaned = ''\n try:\n f = open(path)\n raw = f.read().lower()\n text = raw\n text_cleaned = text.translate(None, punctuation + digits)\n # print \"\\n Word count before:\" + str(len(text_translated.split())) + \"\\n\"\n # for stop in stop_word:\n # text_translated = text_translated.replace(stop,'')\n # print \"\\n Word count after:\" + str(len(text_translated.split())) + \"\\n\"\n text_cleaned = ' '.join([word for word in text_cleaned.split(' ') if (word and len(word) > 1)])\n \n finally:\n f.close()\n return text_cleaned", "def process_text(text: str, max_length: int, pad: bool = True) -> Tuple[List[str], List[str]]:\n input_unigrams = [c for c in text]\n text_bigrams = DatasetLSTM.compute_bigrams(text)\n input_bigrams = [c for c in text_bigrams]\n # cut to max len\n input_unigrams = input_unigrams[:max_length]\n input_bigrams = input_bigrams[:max_length]\n # pad sequences\n if pad and len(input_unigrams) < max_length:\n input_unigrams += [\"<PAD>\"] * (max_length - len(input_unigrams))\n if pad and len(input_bigrams) < max_length:\n input_bigrams += [\"<PAD>\"] * (max_length - len(input_bigrams))\n return input_unigrams, input_bigrams", "def getTextStatsFeat(text, stemmRequired = True,\r\n excludeStopwordsRequired = True):\r\n #length = len(text)\r\n sentenceCount = len(re.findall(\"[.?!]\", text))\r\n exclamationMarkCount = len(re.findall(\"[!]\", text))\r\n questionMarkCount = len(re.findall(\"[?]\", text))\r\n digitsCount = len(re.findall(\"[0-9]+\", text))\r\n text = text.replace(\",\", \" \").replace(\".\", \" \")\r\n cleanText = re.sub('[^a-zа-я0-9]', ' ', text.lower())\r\n wordCount = 0.0\r\n charCount = 0.0\r\n rusCharCount = 0.0\r\n engCharCount = 0.0\r\n if excludeStopwordsRequired:\r\n for w in cleanText.split():\r\n if len(w)>1 and w not in stopwords:\r\n if not (not stemmRequired or re.search(\"[0-9a-z]\", w)):\r\n w = stemmer.stem(w)\r\n wordCount += 1\r\n c, rus, eng = getWordCharCount(w)\r\n charCount += c\r\n rusCharCount += rus\r\n engCharCount += eng\r\n else:\r\n for w in cleanText.split():\r\n if len(w)>1:\r\n if not (not stemmRequired or re.search(\"[0-9a-z]\", w)):\r\n w = stemmer.stem(w)\r\n wordCount += 1\r\n c, rus, eng = getWordCharCount(w)\r\n charCount += c\r\n rusCharCount += rus\r\n engCharCount += eng\r\n # per sentence\r\n wordPerSentence = tryDivide(wordCount, sentenceCount)\r\n charPerSentence = tryDivide(charCount, sentenceCount)\r\n rusCharPerSentence = tryDivide(rusCharCount, sentenceCount)\r\n engCharPerSentence = tryDivide(engCharCount, sentenceCount)\r\n # per word\r\n charPerWord = tryDivide(charCount, wordCount)\r\n rusCharPerWord = tryDivide(rusCharCount, wordCount)\r\n engCharPerWord = tryDivide(engCharCount, wordCount)\r\n # ratio\r\n rusCharRatio = tryDivide(rusCharCount, charCount)\r\n engCharRatio = tryDivide(engCharCount, charCount)\r\n rusCharVsEngChar = tryDivide(rusCharCount, engCharCount)\r\n engCharVsRusChar = tryDivide(engCharCount, rusCharCount)\r\n \r\n stats = [\r\n sentenceCount,\r\n wordCount,\r\n charCount,\r\n rusCharCount,\r\n engCharCount,\r\n digitsCount,\r\n exclamationMarkCount,\r\n questionMarkCount,\r\n wordPerSentence,\r\n charPerSentence,\r\n rusCharPerSentence,\r\n engCharPerSentence,\r\n charPerWord,\r\n rusCharPerWord,\r\n engCharPerWord,\r\n rusCharRatio,\r\n engCharRatio,\r\n rusCharVsEngChar,\r\n engCharVsRusChar,\r\n ]\r\n statsFeat = \"\"\r\n for i,f in enumerate(stats):\r\n if f != 0:\r\n statsFeat += \"%s:%s \" % (i+1, f)\r\n statsFeat = statsFeat[:-1] \r\n return statsFeat" ]
[ "0.64146006", "0.6411835", "0.61727315", "0.6159938", "0.615653", "0.6079194", "0.5983894", "0.5901329", "0.5890508", "0.5824049", "0.5790439", "0.5750861", "0.57246953", "0.5720506", "0.570198", "0.5676", "0.564766", "0.56303865", "0.55933934", "0.55636495", "0.5511339", "0.5498862", "0.54940075", "0.54669625", "0.5451984", "0.54320365", "0.5409892", "0.5409645", "0.5392661", "0.53883505" ]
0.6710242
0
Request the status of a message with the provided id and return a response dictionary. Returns a dictionary that contains a 'delivery' key with the status value string or contains 'errorCode' and 'message' on error.
def check_status(self, message_id): values = {'token': self._token, 'reference': message_id} return self._request(self.CHECK_STATUS_URL, values)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_status(id):\n task = run_ctx_request.AsyncResult(id)\n if task.state == states.PENDING:\n abort(404)\n if task.state == states.RECEIVED or task.state == states.STARTED:\n return '', 202, {'Location': url_for('api.get_status', id=id)}\n return task.info", "def get_task_status(self, **kwargs):\n if kwargs is None or kwargs['parameters'] is None:\n message = \"For 'get_task_status' method parameters are not parsed.\"\n logger.critical(message)\n raise ValueError(message)\n\n if \"message_id\" not in kwargs['parameters']:\n message = \"Key 'message_id' not in kwargs.\"\n logger.critical(message)\n raise ValueError(message)\n\n message_id = kwargs['parameters']['message_id']\n\n return_data = {\"state\": \"Error\"}\n auth = self.authenticate()\n if auth == 200:\n task_completed = False\n state_message = \"Queued\"\n while not task_completed:\n sleep(WAIT_TIME_BETWEEN_REQUESTS)\n response = Utils.make_get_request(self.url(\"TaskInfo\" + \"/\" + str(message_id)),\n headers=self.request_header, verify=False)\n if 'StateMessage' in response.json():\n state_message = response.json()['StateMessage']\n if state_message == \"Success\" or state_message == \"Error\":\n task_completed = True\n return_data[\"state\"] = state_message\n if state_message == \"Success\":\n return_data[\"vm_id\"] = response.json()['Result']\n else:\n message = \"unable to authenticate to the PlatformA server,\" \\\n \" got the below response from server {}\".format(auth)\n logging.debug(message)\n raise Exception(message)\n\n return return_data", "def json_status_by_id(id):\n status = Status.query.filter(Status.id==id).first()\n if status is None:\n abort(404)\n return jsonify(status.get_public_dict())", "def get_order(self, order_id):\n try:\n self.ask_request()\n response = self._request(\n 'order/status', {'order_id': int(order_id)})\n order_status = response.json()\n except Exception as e:\n raise ExchangeRequestError(error=e)\n\n if 'message' in order_status:\n raise ExchangeRequestError(\n error='Unable to retrieve order status: {}'.format(\n order_status['message'])\n )\n return self._create_order(order_status)", "async def get_status(self, sms_id: int) -> SmsStatus:\n raise NotImplementedError", "def get_status_by_id(cls, request, id):\n return request.dbsession.query(cls).get(id).status", "def get_task_status(id):\n # obtain the task and validate it\n global background_tasks\n rv = background_tasks.get(id)\n if rv is None:\n return not_found(None)\n\n # if the task object is a Thread object that means that the task is still\n # running. In this case return the 202 status message again.\n if isinstance(rv, Thread):\n return jsonify({}), 202, {'Location': url_for('get_task_status', id=id)}\n\n # If the task object is not a Thread then it is assumed to be the response\n # of the finished task, so that is the response that is returned.\n # If the application is configured to auto-delete task status resources once\n # the task is done then the deletion happens now, if not the client is\n # expected to send a delete request.\n if app.config['AUTO_DELETE_BG_TASKS']:\n del background_tasks[id]\n return rv", "def get_queue_status(self, mailing_id):\n if getattr(self.settings, 'AK_TEST', False):\n return self.TEST_DATA.get('get_queue_status')\n res = self.client.get(\n #the '/' at the end is IMPORTANT!\n '%s/rest/v1/mailer/%s/progress/' % (self.base_url, mailing_id)\n )\n rv = {'res': res}\n if res.status_code == 200:\n res_dict = res.json()\n rv['status'] = res_dict.get('status', None)\n rv['finished'] = res_dict.get('finished', None)\n rv['progress'] = res_dict.get('progress', None)\n rv['target_count'] = res_dict.get('expected_send_count', None)\n rv['started_at'] = res_dict.get('started_at', None)\n return rv", "def get_message(self, id):\n url = \"https://api.imgur.com/3/message/{0}\".format(id)\n resp = self._send_request(url)\n return Message(resp, self)", "def patch(self, id):\n try:\n task = update_status(get_db(), id, Status[api.payload[\"status\"]])\n if not task:\n api.abort(404, \"Invalid Task\")\n return task_to_dict(task)\n except ValueError:\n api.abort(422, \"Invalid Status\")", "def get_state_by_id(state_id):\r\n response = Response(json.dumps(json_error(ResponsesREST.INVALID_INPUT.value)),\r\n status=ResponsesREST.INVALID_INPUT.value, mimetype=\"application/json\")\r\n if validator_id.is_valid({\"id\": state_id}):\r\n state_get = State()\r\n state_get.id_state = state_id\r\n result = state_get.get_state()\r\n if result in (ResponsesREST.NOT_FOUND.value, ResponsesREST.SERVER_ERROR.value):\r\n response = Response(json.dumps(json_error(result)),\r\n status=result, mimetype=\"application/json\")\r\n else:\r\n response = Response(json.dumps(result.json_state()),\r\n status=ResponsesREST.SUCCESSFUL.value,\r\n mimetype=\"application/json\")\r\n return response", "def show_message(self, message_id):\n url = 'messages/%s' % str(message_id)\n resp, body = self.get(url)\n body = json.loads(body)\n self.validate_response(schema.show_message, resp, body)\n return rest_client.ResponseBody(resp, body)", "def fetch_order_status(order_id: str):\n try:\n return EXCHANGE.fetch_order_status(order_id)\n\n except (ccxt.ExchangeError, ccxt.NetworkError) as error:\n LOG.error(RETRY_MESSAGE, type(error).__name__, str(error.args))\n sleep_for(4, 6)\n fetch_order_status(order_id)", "async def get_device_status(self, device_id: str) -> dict:\r\n return await self.get(API_DEVICE_STATUS.format(device_id=device_id))", "def get_response(self, msg):\n if msg.notification:\n return None\n elif msg.error:\n return (msg.error.status, \n self._build_error(msg.error, msg.message_id))\n elif msg.result:\n return (200, self._build_result(msg))\n else: # pragma: no cover\n # Should never be reached\n logging.warn('Message neither contains an error nor a result')", "def get(self, id):\n result_task = AsyncResult(id = id, app = backapp)\n state = result_task.state\n\n if state == states.STARTED:\n return { 'id':result_task.task_id, 'status': state }, 200\n # task still pending or unknown\n elif state == states.PENDING:\n return { 'id':result_task.task_id, 'status': state }, 200\n elif state == states.SUCCESS:\n return { 'id':result_task.task_id, 'status': state }, 303, {'Location': api.url_for(MathJobResult,id=result_task.task_id)}\n else:\n return error(result_task)", "def message():\n # Retrieve JSON parameters data.\n data = request.get_json() or {}\n data.update(dict(request.values))\n msg = data.get(\"msg\")\n if not msg:\n raise abort(400, \"missing 'msg' data\")\n\n # Deffer the message as a task.\n result = tasks.process_message.delay(msg, delta=10)\n task_id = result.task_id\n if not task_id or result.failed():\n raise abort(400, \"task failed\")\n # Then check and return ID.\n return {\n \"task_id\": result.id\n }", "def status(self, job_id: str) -> dict:\n session = self._session()\n response = session.get(self._status_url(job_id))\n if response.ok:\n fields = [\n 'status', 'message', 'progress', 'createdAt', 'updatedAt', 'request',\n 'numInputGranules'\n ]\n status_subset = {k: v for k, v in response.json().items() if k in fields}\n return {\n 'status': status_subset['status'],\n 'message': status_subset['message'],\n 'progress': status_subset['progress'],\n 'created_at': dateutil.parser.parse(status_subset['createdAt']),\n 'updated_at': dateutil.parser.parse(status_subset['updatedAt']),\n 'request': status_subset['request'],\n 'num_input_granules': int(status_subset['numInputGranules']),\n }\n else:\n response.raise_for_status()", "def status_message(message):\n return StatusMessage(message)", "def get(self, id):\n return read_msg(id)", "def get_by_id(self, status_id: int) -> Status:\n return self.__mapper.map(\n self.__repository.get_by_id(status_id),\n Status\n )", "def get_status(self, build_id):\n url = f\"{self.base_url}/build\"\n payload = {\"build_id\": build_id}\n response = requests.get(url, json=payload, headers=self.headers)\n\n try:\n status = json.loads(response.text)\n except:\n status = response.text\n\n return status", "def request_status(job_id):\n status = _database_operations.get_status(job_id, Session())\n if status is None:\n flask.abort(404)\n else:\n return json.dumps({\n 'status': status.status,\n 'finished': status.finished\n })", "def set_task_delivered_by_id(task_id):\n task_dict = {}\n filter_task = TaskNotification.objects.filter(task_id=task_id).first()\n if filter_task:\n filter_task.status = 'delivered'\n filter_task.save()\n task_dict = {\n 'id': task_id,\n 'name': filter_task.name,\n 'status': filter_task.status,\n 'payload': filter_task.payload\n }\n return task_dict", "def get_message(self, message_id):\n req_data = [ str(message_id) ]\n return self.request(\"find:Message.stats, Message.content\", req_data)", "def msgStatus():\n return jsonify({\"status\": \"OK\"})", "def get_status(job_id):\n job = fetch_data.AsyncResult(job_id, app=app)\n return jsonify({'job_id': job_id, 'status': job.status})", "def fetch_email_status_by_message_id(cls, message_id: str):\n result = cls.mailjet_retrieve.messagehistory.get(id=message_id).json()\n if len(result[\"Data\"]) == 0:\n return None\n recent_event = result[\"Data\"][-1]\n return recent_event", "def get(self, id=None):\n\n\t\tif id:\n\t\t\tfor m in self.messages:\n\t\t\t\tif m[\"id\"] == id:\n\t\t\t\t\treturn(m)\n\n\t\t\tapi.abort(404, \"Message {} doesn't exist.\".format(id))\n\t\telse:\n\t\t\treturn(self.messages)", "def show(self, req, id):\n context = req.environ['manila.context']\n\n try:\n message = self.message_api.get(context, id)\n except exception.MessageNotFound as error:\n raise exc.HTTPNotFound(explanation=error.msg)\n\n return self._view_builder.detail(req, message)" ]
[ "0.66030747", "0.609507", "0.60182613", "0.60108596", "0.5971447", "0.5942305", "0.5873877", "0.58737046", "0.58452946", "0.58209074", "0.5817636", "0.57952654", "0.57667154", "0.57663524", "0.5756407", "0.57147986", "0.56973785", "0.5649888", "0.56432354", "0.56317407", "0.55976236", "0.55834115", "0.55528075", "0.5538122", "0.55356485", "0.55309683", "0.55284435", "0.551028", "0.5481058", "0.54385144" ]
0.616582
1
Returns a frame as a byte string of TIFF image data (or None). The byte string can be displayed with image(None, data=Camera.frame()).
def frame(self): try: AppHelper.runConsoleEventLoop(installInterrupt=True) return str(self._delegate.frame.representations()[0].TIFFRepresentation().bytes()) except: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getFrame(self):\n s, image = self.capture.read()\n return image", "def grabRawFrame(self):\r\n \r\n self.surface = self.capture.get_image(self.surface)\r\n width, height = self.surface.get_size()\r\n return pygame.image.tostring(self.surface, 'RGB'), width, height, 1", "def frame_data(self) -> str:\n pass", "def grabFrame(self):\r\n \r\n data, w, h, orientation = self.grabRawFrame()\r\n return Image.fromstring(\"RGB\", (w, h), data, \"raw\", \"BGR\", 0, orientation)", "def send_frame(self):\n frame = self.frame_buffer.get()\n result, jpeg = cv2.imencode(\".jpg\", frame.nparray)#, self.encode_param)\n data = numpy.array(jpeg)\n string_data = data.tostring()\n self.sock.send(str(len(string_data)).ljust(16))\n self.sock.send(string_data)", "def get_frame(self):\n self._serial_port.close()\n self._serial_port.open()\n\n self._request_frame()\n\n serial_data = self._serial_port.readall()\n\n frame_start_idx = serial_data.find(BEGIN_FRAME) + len(BEGIN_FRAME)\n frame_end_idx = serial_data.find(END_FRAME)\n\n print serial_data[0:frame_start_idx]\n print serial_data[frame_end_idx:]\n\n raw_frame = serial_data[frame_start_idx:frame_end_idx]\n\n np_frame = np.fromstring(raw_frame, dtype=np.uint8)\n # np_frame = np_frame.reshape((30, 30))\n\n # image = cv2.fromarray(np_frame)\n\n # return image\n return np_frame", "def grab_frame(self):\n with self._buflock:\n if self._buffer is None:\n return None\n buf = self._buffer.tostring()\n return buf", "def gen_frame():\n while True:\n frame = camera_stream()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/png\\r\\n\\r\\n' + frame + b'\\r\\n') # concate frame one by one and show result", "def to_blob(self):\n x = cv2.dnn.blobFromImage(self.frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False)\n return x", "def get_image(self, frame):\n msec = frame * config.MS_PER_FRAME\n frame = msec // 250\n return self.frames[frame % self.num_frames]", "def _save_frame_as_png(\n self : \"animation\",\n frame : \"np.ndarray\",\n filename : \"str\"\n ):\n im = Image.fromarray(frame)\n im.save(filename)", "def get_frame(self,t):\n\n return pyfx.util.to_array(self._img_list[t],dtype=np.uint8,\n num_channels=4)", "def _get_single_frame(self, real_t: int, **kwargs) -> Image:\n if self._is_tiff:\n ret = self._reader.read(index=..., page=real_t, **kwargs)\n else:\n ret = self._reader.read(index=real_t, **kwargs)\n ret = ret.view(Image)\n ret.frame_no = real_t\n return ret", "def output_frame(self):\n if self._pipeline:\n frame = self._pipeline[-1].frame\n if not isinstance(frame, str):\n frame = frame.name\n return getattr(self, frame)\n else:\n return None", "def get_frame_by_frame(name=None, fps=4, write_to_disk=False, display_feed=False, on_capture=None):\n\n reset_camera()\n\n if name is None:\n name = \"fbf_\" + str(int(time()))\n \n dname = None\n if write_to_disk:\n chdir(cwd)\n dname = join(dirname(realpath(sys.argv[0])), \"train\", \"data\", name)\n if not exists(dname):\n print(\"Created dir: %s\" % dname)\n mkdir(dname)\n else:\n print(\"Using dir: %s\" % dname)\n else:\n print('Not writing to disk')\n\n def _snap(name, dname, write, display, capture_callback):\n global camera\n s, img = camera.read()\n\n if s and capture_callback:\n img = capture_callback(img)\n\n if s and display:\n cv2.imshow(name, img)\n cv2.waitKey(1) \n\n if write:\n chdir(dname)\n number_of_files = len([item for item in os.listdir(dname) if os.path.isfile(os.path.join(dname, item))])\n path = \"./\" + str(number_of_files + 1) + \".png\"\n if s:\n imwrite(path, img)\n print(\"Saved to \" + dname + \"/\" + str(number_of_files + 1) + \".png\")\n else:\n print(\"Could not read image %d from camera\" % (number_of_files + 1))\n chdir(cwd)\n\n return Timer(1 / fps, _snap, name, dname, write_to_disk, display_feed, on_capture).use_mp()", "def write_frame(self, data):\n try:\n cache_name = self.CACHE_FILE_NAME + str(time.time()) + '.jpg'\n file = open(cache_name, \"wb\")\n file.write(data)\n file.close()\n return cache_name\n except:\n return \"\"", "def captureimage(self):\n if not self.total_time:\n return self.frames[-1]\n return None", "def set_frame_data(self, frame):\n ret, jpeg = cv2.imencode('.jpg', cv2.resize(frame, self.resolution))\n if not ret:\n raise Exception('Failed to set frame data')\n self.frame = jpeg", "def set_frame_data(self, frame):\n ret, jpeg = cv2.imencode('.jpg', cv2.resize(frame, self.resolution))\n if not ret:\n raise Exception('Failed to set frame data')\n self.frame = jpeg", "def snapshot(self, components=4):\n fbo = self.fbo\n data = fbo.read(components=3)\n from PIL import Image\n return Image.frombytes('RGB', fbo.size, data).transpose(Image.FLIP_TOP_BOTTOM)", "def toString(self):\r\n if self.mesgType == MULTIPLEXER_FRAME_NOT_INIT:\r\n raise AttributeError, \"Frame is not yet initialized!\"\r\n \r\n # Create header\r\n frameHeader = MULTIPLEXER_FRAME_DIVIDER + str(self.mesgType) + MULTIPLEXER_FRAME_DIVIDER + str(self.contentLength) + \\\r\n MULTIPLEXER_FRAME_DIVIDER + str(self.referenceID) + MULTIPLEXER_FRAME_DIVIDER\r\n \r\n # Determine variable header size\r\n headerSize = str(len(frameHeader)).rjust(MULTIPLEXER_FRAME_HEADER_DIGITS,\"0\")\r\n \r\n if len(headerSize) > MULTIPLEXER_FRAME_HEADER_DIGITS:\r\n raise AttributeError, \"Frame Header too large! Max:\"+ MULTIPLEXER_FRAME_HEADER_DIGITS+ \" Actual:\"+ len(headerSize)\r\n \r\n return headerSize + frameHeader + self.content", "def set_frame_data(self, frame):\n ret, jpeg = cv2.imencode('.jpg', cv2.resize(frame, self.resolution))\n \n if not ret:\n raise Exception('Failed to set frame data')\n self.frame = jpeg", "def gen_testcamera(self, camera):\n cap = cv2.VideoCapture(camera)\n retval, frame = cap.read()\n cap.release()\n if retval:\n cv2.imwrite('t.jpg', frame)\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + open('t.jpg', 'rb').read() + b'\\r\\n')\n else:\n yield(\"Problem in camera\")", "def raw_data(self):\n return self.tif_file.raw_data()", "def gen(camera):\n while True:\n frame = camera.get_frame()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + bytearray(frame) + b'\\r\\n')", "def gen(camera):\n track = Track()\n while True:\n frame = track.get_frame()\n ret, jpeg = cv2.imencode('.jpg', frame)\n frame = jpeg.tobytes()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')", "def gen(camera):\n \n while True:\n \n \n \n frame = camera.get_frame()\n \n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')", "def gen_frames(camera):\n while True:\n success, frame = camera.read()\n if not success:\n break\n else:\n ret, buffer = opencv.imencode('.jpg', frame)\n frame = buffer.tobytes()\n yield (b'--frame\\r\\n' b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')", "def encodeFrame(frame):\n return base64.b64encode(frame)", "def convertFrame(self):\n try:\n img = QImage(self.currentVideoFrame,\n self.currentVideoFrame.shape[1],\n self.currentVideoFrame.shape[0],\n QImage.Format_RGB888\n )\n return img\n except:\n return None" ]
[ "0.6276596", "0.6158309", "0.60090196", "0.5957665", "0.58654284", "0.58341444", "0.5809219", "0.57665694", "0.5721019", "0.5706413", "0.56766975", "0.5670235", "0.56531984", "0.5624375", "0.55753624", "0.55488795", "0.55343914", "0.5529199", "0.5529199", "0.5523762", "0.55122685", "0.551128", "0.5509814", "0.5507487", "0.5496761", "0.5493704", "0.5476283", "0.5469981", "0.54598385", "0.5440655" ]
0.7610338
0
Decorator which checks the user is a prof before executing a view Redirect to the index page if not
def login_prof(func): @wraps(func, assigned=available_attrs(func)) def wrapper(request, *args, **kwargs): try: request.user.prof except ObjectDoesNotExist: return redirect('gradapp:dashboard_student') res = func(request, *args, **kwargs) return res return wrapper
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def login_required(func):\n @wraps(func)\n def f(*args, **kwargs):\n if g.user is None:\n app.logger.info('redirecting not logged in user')\n return redirect(url_for('index'))\n elif not g.user.initialized and f.__name__ not in ['profile_create','logout']:\n return redirect(url_for('profile_create'))\n else:\n return func(*args, **kwargs)\n return f", "def require_visitor(func):\n\n @wraps(func)\n def decorator(*args, **kwargs):\n if g.user:\n return redirect(url_for('site.home'))\n return func(*args, **kwargs)\n\n return decorator", "def requires_entrepreneur(func):\n def decorator(request, *args, **kwargs):\n if request.user.is_authenticated() and not request.user.is_entrepreneur():\n return redirect('dashboard')\n else:\n return func(request, *args, **kwargs)\n return decorator", "def login_success(request):\n if not hasattr(request.user, 'profile'):\n return redirect('index')\n else:\n return redirect('registration_process')", "def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(url_for(\"index\"))\n return f(*args, **kwargs)\n return decorated_function", "def process_view(request, view_func, view_args, view_kwargs):\n assert hasattr(request, \"user\")\n path = request.path_info\n url_is_exempt = any(url.match(path) for url in EXEMPT_URLS)\n if url_is_exempt:\n return None\n else:\n try:\n if request.session[\"user\"]:\n user = User(request.session[\"user\"])\n if user.is_valid() and user.check_key():\n return None\n else:\n return redirect(settings.LOGIN_URL)\n else:\n return redirect(settings.LOGIN_URL)\n except:\n return redirect(settings.LOGIN_URL)", "def student_restricted(function):\n @wraps(function)\n def decorated(*args, **kwargs):\n if is_setup():\n user = get_current_user()\n if user is not None and user.type == User.STUDENT:\n return function(*args, **kwargs)\n return redirect('/')\n return decorated", "def teacher_restricted(function):\n @wraps(function)\n def decorated(*args, **kwargs):\n if is_setup():\n user = get_current_user()\n if user is not None and user.type == User.TEACHER:\n return function(*args, **kwargs)\n return redirect('/')\n return decorated", "def defaultlanding():\n #send user to description page if not logged in\n if not g.user:\n return redirect(url_for('description'))\n #display leaderboard for competition if logged in\n return redirect(url_for('leaderboard'))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def __call__(self,request):\n if not request.user.is_anonymous:\n if not request.user.is_staff:\n profile = request.user.profile\n if not profile.picture or not profile.biography:\n if request.path not in [reverse('users:update'), reverse('users:logout')]:\n return redirect('users:update')\n response = self.get_response(request)\n return response", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def _handle_view(self, name, **kwargs):\n if not self.is_accessible():\n if current_user.is_authenticated:\n # permission denied\n abort(403)\n else:\n # login\n return redirect(url_for('security.login', next=request.url))", "def admin_required(f):\n @functools.wraps(f)\n def wrapper(*a, **kw):\n if db_session.query(Admin).filter(Admin.id == current_user.id).first() is None:\n return redirect(url_for('index'))\n return f(*a, **kw)\n return wrapper", "def user_profile():\n if CURR_USER_KEY in session:\n return render_template('/profile/detail.html')\n else:\n return redirect('/login')", "def home(request):\n if 'member_id' not in request.session:\n return redirect(\"/login/\")\n return render(request, 'esihapp/index1.html')", "def landing():\n if g.user:\n return render_template('landing.html', user=g.user)\n return redirect(url_for('login'))" ]
[ "0.6907077", "0.688773", "0.671351", "0.66562635", "0.6634365", "0.6625549", "0.66013575", "0.65863526", "0.6540508", "0.65245664", "0.65245664", "0.65245664", "0.64538014", "0.64372694", "0.64372694", "0.64372694", "0.64372694", "0.64372694", "0.64372694", "0.64372694", "0.64372694", "0.64372694", "0.64372694", "0.64372694", "0.64372694", "0.64372694", "0.6382329", "0.6360428", "0.6348152", "0.6342058" ]
0.7059964
0
Evaluate the assignment (pk=assignment_pk) and makes your evaluation a superevaluation. Assignment seen as eval__
def supereval_assignment(request, assignment_pk, i): assignment = Assignment.objects.get(id=assignment_pk) evalassignment = Evalassignment.objects.filter(assignment=assignment, is_supereval=True).first() redirect_url = ('/detail_assignmentype/%s/#assignment_%s' % (assignment.assignmentype.id, assignment.id)) if not evalassignment: evalassignment = Evalassignment(evaluator=request.user, assignment=assignment) evalassignment.is_supereval = True evalassignment.save() for iq in range(assignment.assignmentype.nb_questions): Evalquestion.objects.create(evalassignment=evalassignment, question=(iq + 1)) context = base_eval_assignment(request, evalassignment, i, '/supereval_assignment/%s/%s/' % (assignment_pk, i), redirect_url) if context: return render(request, 'gradapp/evalassignment_form.html', context) else: return redirect(redirect_url)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def eval_evalassignment(request, pk, pts):\n student = request.user.student\n evalassignment = Evalassignment.objects.\\\n filter(pk=pk, assignment__student=student).first()\n if evalassignment:\n evalassignment.grade_evaluation = pts\n evalassignment.save()\n redirect_item = '#assignment%s' % evalassignment.assignment.id\n else:\n redirect_item = ''\n return redirect('/dashboard_student/' + redirect_item)", "def eval_assignment(exp, env):\n set_variable_value(assignment_variable(exp), m_eval(assignment_value(exp), env), env)\n return quote(\"ok\")", "def set_eval(self):\n self.model.eval()", "def eval(self):\n pass", "def eval(self):\n pass", "def eval(self):\n pass", "def eval(self):\n raise NotImplementedError", "def eval(self):\n raise NotImplemented()", "def eval(self):\n raise NotImplementedError('Must define eval function to use this base class')", "def _set_eval(self):\n\n if self.model.__dict__['training']:\n self.model.eval()", "def evaluation(self, evaluation):\n\n self._evaluation = evaluation", "def eval(self, *args, **kwargs):\n raise NotImplementedError", "def base_eval_assignment(request, evalassignment, i, url_action, url_cancel):\n error = ''\n EvalquestionFormSet =\\\n modelformset_factory(Evalquestion, extra=0,\n fields=['grade', 'comments'],\n widgets={'grade':\n forms.NumberInput(attrs={'min': 0,\n 'max': 2}),\n 'comments':\n forms.Textarea(attrs={'cols': 80,\n 'rows': 10})})\n qs = Evalquestion.objects.filter(evalassignment=evalassignment).\\\n order_by('question')\n if request.method == 'POST' and (evalassignment.assignment.assignmentype.\n deadline_grading >= timezone.now() or\n evalassignment.is_supereval):\n formset = EvalquestionFormSet(request.POST, queryset=qs)\n if formset.is_valid():\n formset.save()\n # if evaluation is modified, evaluation grade is reset\n evalassignment.grade_evaluation = 0\n evalassignment.is_questions_graded =\\\n (None not in [q.grade for q\n in evalassignment.evalquestion_set.all()])\n evalassignment.save()\n # set evalassignment.grade_assignment if question coeff exist\n log = tasks.compute_grade_evalassignment(evalassignment.id)\n logger.error(log)\n return None\n else:\n formset = EvalquestionFormSet(queryset=qs)\n if evalassignment.assignment.assignmentype.\\\n deadline_grading < timezone.now():\n error = 'Too late to grade or to modify your grading...'\n assignmentype = evalassignment.assignment.assignmentype\n if assignmentype.questions_statement:\n list_questions = [ '%s: %s' % (j + 1, s) for j, s in\n enumerate(assignmentype.questions_statement)]\n else:\n list_questions = [j for j in range(1, assignmentype.nb_questions + 1)]\n context = {'formset': zip(formset, list_questions),\n 'formset_management_form': formset.management_form,\n 'title': assignmentype.title,\n 'i': i,\n 'evalassignment_name': 'assign_%s_%s' %\n (assignmentype.title.replace(\" \", \"\"), i),\n 'description': assignmentype.description,\n 'evalassignment_id': evalassignment.id,\n 'deadline': assignmentype.deadline_grading,\n 'error': error,\n 'url_action': url_action,\n 'url_cancel': url_cancel}\n return context", "def evaluate(self):\n try:\n self._evaluate()\n except Exception as e:\n if str(e) == \"assignment destination is read-only\":\n log.exception(\n \"Encountered error during scenario evaluation. Be sure \"\n + \"that the classifier's predict() isn't directly modifying the \"\n + \"input variable itself, as this can cause unexpected behavior in ART.\"\n )\n else:\n log.exception(\"Encountered error during scenario evaluation.\")\n sys.exit(1)\n\n if self.results is None:\n log.warning(f\"{self._evaluate} did not set self.results to a dict\")\n\n self.save()", "def evaluator(self, evaluator):\n self.__evaluator = evaluator", "def eval_assignment(request, pk, i):\n evalassignment = Evalassignment.objects.filter(evaluator=request.user,\n pk=pk).first()\n if evalassignment and evalassignment.assignment.assignmentype.\\\n deadline_submission < timezone.now():\n # if evalassignment exists and if it is after the submission deadline\n context = base_eval_assignment(\n request, evalassignment, i,\n '/eval_assignment/%s/%s/' % (evalassignment.id, i),\n '/dashboard_student/')\n if context:\n return render(request, 'gradapp/evalassignment_form.html', context)\n else:\n return redirect('/dashboard_student/#assignment%s' %\n evalassignment.assignment.id)\n else:\n # if evalassignment does not exist or before submission deadline\n if evalassignment:\n redirect_item = '#assignment%s' % evalassignment.assignment.id\n else:\n redirect_item = ''\n return redirect('/dashboard_student/' + redirect_item)", "def __assign_policy_def(self):\n\n self.logger.info(\n f\"Creating policy assignment of definition {self.policy_id} to assignment {self.assignment_id}\"\n )\n policy_assignment_res = self.interactor.put_policy_assignment(\n self.policy_id, self.assignment_id\n )\n\n if policy_assignment_res.status_code != 201:\n self.output_res[\"result\"][\"status\"] = \"ERROR\"\n self.output_res[\"result\"][\n \"message\"\n ] = f\"Policy assignment {self.assignment_id} could not be created - {policy_assignment_res.status_code}: {policy_assignment_res.text}\"\n\n self.running_evaluations[self.eval_id] = self.output_res\n return False\n\n return True", "def eval(self, logger=None):\n self.model.eval()\n self.model_DP.eval()\n logger.info(\"Successfully set the model eval mode\")", "def set_eval(self):\n for m in self.models.values():\n m.eval()", "def eval(cls, *args):\n raise NotImplementedError(\"subclasses need to override this method\")", "def isAssignment(self):\n return _libsbml.Rule_isAssignment(self)", "def eval_obj(self):\n if self._eval_obj is not self.null:\n return self._eval_obj\n else:\n evaled_args = [getattr(i, \"eval_obj\", i) for i in self._tuple[1:]]\n arg_grps = toolz.groupby(lambda x: isinstance(x, KwdPair), evaled_args)\n evaled_args = arg_grps.get(False, [])\n evaled_kwargs = arg_grps.get(True, [])\n\n op = self._tuple[0]\n op = getattr(op, \"eval_obj\", op)\n\n try:\n op_sig = inspect.signature(op)\n except ValueError:\n # This handles some builtin function types\n _eval_obj = op(*(evaled_args + [kw.value for kw in evaled_kwargs]))\n else:\n op_args = op_sig.bind(*evaled_args, **{kw.arg: kw.value for kw in evaled_kwargs})\n op_args.apply_defaults()\n\n _eval_obj = op(*op_args.args, **op_args.kwargs)\n\n # assert not isinstance(_eval_obj, ExpressionTuple)\n\n self._eval_obj = _eval_obj\n return self._eval_obj", "def eval_assignment(assignment, motif_node_dict):\n if type(assignment.rvalue).__name__ == 'FuncCall':\n motif_node, tree_node = eval_function_call(assignment.rvalue, motif_node_dict)\n # consider \"var = XXX;\" and \"*var = XXX\" and \"&var = XXX\" situations\n if (type(assignment.lvalue).__name__ == 'ID' and assignment.lvalue.name in motif_node_dict) or (type(assignment.lvalue).__name__ == 'UnaryOp' and assignment.lvalue.expr.name in motif_node_dict):\n if not motif_node:\n print('\\33[101m' + '[error][eval_assignment]: ' + assignment.lvalue.name + ' is in the dictionary. MotifNode should not be None.\\033[0m')\n exit(1)\n else:\n motif_node_dict[assignment.lvalue.name].append(motif_node)\n return tree_node\n # In a case where a provenance node was declared but then assigned or reassigned. For example:\n # struct provenance *tprov;\n # ...\n # tprov = t->provenance;\n # tprov must then be in the motif_node_dict.\n elif type(assignment.lvalue).__name__ == 'ID' and assignment.lvalue.name in motif_node_dict:\n # we can only infer its type from the name of the variable\n motif_node = provenance.create_motif_node(assignment.lvalue.name)\n motif_node_dict[assignment.lvalue.name].append(motif_node)\n return None\n elif type(assignment.lvalue).__name__ == 'UnaryOp' and type(assignment.lvalue.expr).__name__ == 'ID' and assignment.lvalue.expr.name in motif_node_dict:\n # similar case as the previous one, except that we have: *tprov = ...\n # we can only infer its type from the name of the variable\n motif_node = provenance.create_motif_node(assignment.lvalue.expr.name)\n motif_node_dict[assignment.lvalue.expr.name].append(motif_node)\n return None\n else:\n #######################################################\n # We will consider other conditions if we ever see them\n # POSSIBLE CODE HERE.\n #######################################################\n return None", "def eval(self, expr, locals):\r\n sav = self.locals_ptr\r\n self.locals_ptr = locals\r\n x = eval(self.compile(expr), {\"__builtins__\":self.eval_allowed_globals}, locals)\r\n self.locals_ptr = sav\r\n return x", "def createAssignmentRule(self):\n return _libsbml.Model_createAssignmentRule(self)", "def eval(self) -> typing.Any:\n return self.expr()", "def getAssignmentRule(self, *args):\n return _libsbml.Model_getAssignmentRule(self, *args)", "def post_assignment(data, session):\n\n try:\n assignment = ClassicalAssignment(**AssignmentSchema.load(data=data))\n except ValidationError as e:\n raise ValidationError(\n 'Error parsing followup request: ' f'\"{e.normalized_messages()}\"'\n )\n\n run_id = assignment.run_id\n data['priority'] = assignment.priority.name\n run = session.scalars(\n ObservingRun.select(session.user_or_token).where(ObservingRun.id == run_id)\n ).first()\n if run is None:\n raise ValueError('Observing run is not accessible.')\n\n predecessor = session.scalars(\n ClassicalAssignment.select(session.user_or_token).where(\n ClassicalAssignment.obj_id == assignment.obj_id,\n ClassicalAssignment.run_id == run_id,\n )\n ).first()\n\n if predecessor is not None:\n raise ValueError('Object is already assigned to this run.')\n\n assignment = ClassicalAssignment(**data)\n\n if hasattr(session.user_or_token, 'created_by'):\n user_id = session.user_or_token.created_by.id\n else:\n user_id = session.user_or_token.id\n\n assignment.requester_id = user_id\n assignment.last_modified_by_id = user_id\n session.add(assignment)\n session.commit()\n\n flow = Flow()\n flow.push(\n '*',\n \"skyportal/REFRESH_SOURCE\",\n payload={\"obj_key\": assignment.obj.internal_key},\n )\n flow.push(\n '*',\n \"skyportal/REFRESH_OBSERVING_RUN\",\n payload={\"run_id\": assignment.run_id},\n )\n return assignment.id", "def _should_eval(self):\n return False", "def set_models_eval(self):\n raise NotImplementedError" ]
[ "0.6541989", "0.6323441", "0.6086533", "0.5994296", "0.5994296", "0.5994296", "0.59660524", "0.5931263", "0.5913529", "0.5844344", "0.57308245", "0.572046", "0.5713599", "0.56698984", "0.56081396", "0.5587459", "0.55692595", "0.55667025", "0.5522861", "0.551115", "0.54968995", "0.5487504", "0.5485839", "0.54758257", "0.54668", "0.544493", "0.5437282", "0.5436824", "0.54279995", "0.54169476" ]
0.6349211
1
Evaluate the assignment evaluation (Evalassignment(pk=pk)). evalassignment.grade_evaluation = pts (1, 0, +1)
def eval_evalassignment(request, pk, pts): student = request.user.student evalassignment = Evalassignment.objects.\ filter(pk=pk, assignment__student=student).first() if evalassignment: evalassignment.grade_evaluation = pts evalassignment.save() redirect_item = '#assignment%s' % evalassignment.assignment.id else: redirect_item = '' return redirect('/dashboard_student/' + redirect_item)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_evaluated(evalassignment):\n if evalassignment.assignment.document.name == '' or evalassignment.\\\n assignment.assignmentype.deadline_submission > timezone.now():\n return -30\n else:\n if evalassignment.is_questions_graded:\n if evalassignment.grade_evaluation:\n return evalassignment.grade_evaluation\n else:\n return -10\n else:\n return -20", "def evaluate(self):\n try:\n self._evaluate()\n except Exception as e:\n if str(e) == \"assignment destination is read-only\":\n log.exception(\n \"Encountered error during scenario evaluation. Be sure \"\n + \"that the classifier's predict() isn't directly modifying the \"\n + \"input variable itself, as this can cause unexpected behavior in ART.\"\n )\n else:\n log.exception(\"Encountered error during scenario evaluation.\")\n sys.exit(1)\n\n if self.results is None:\n log.warning(f\"{self._evaluate} did not set self.results to a dict\")\n\n self.save()", "def evaluation( self ) :\n\n return( self.__evaluation )", "def evaluation(self):\n return self._evaluation", "def evaluate(self):\n pass", "def evaluate(self):\n pass", "def evaluate(self) :\n pass", "def scoreEvaluationFunction(currentGameState):\r\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()", "def scoreEvaluationFunction(currentGameState):\n return currentGameState.getScore()" ]
[ "0.6718955", "0.64347076", "0.6072748", "0.6022629", "0.5979578", "0.5979578", "0.5941105", "0.59408855", "0.593862", "0.593862", "0.593862", "0.593862", "0.593862", "0.593862", "0.5919771", "0.5919771", "0.5919771", "0.5919771", "0.5919771", "0.5919771", "0.5919771", "0.5919771", "0.5919771", "0.5919771", "0.5919771", "0.5919771", "0.5919771", "0.5919771", "0.5919771", "0.5919771" ]
0.76794904
0
Create an assignmentype or modify it (with new student list).
def create_assignmentype(request, assignmentype_id=None): prof = request.user.prof context = {} if assignmentype_id: assignmentype = Assignmentype.objects.get(id=assignmentype_id) message = 'Reset your assignment. You can upload a new student list, '\ 'but be aware that it will reset the assignment (all former work '\ 'will be lost!)' type_post = 'reset' # reset the assignmentype context['assignmentype_id'] = assignmentype.id else: assignmentype = None message = 'Create a new assignment!' type_post = 'create' # new assignmentype if request.method == 'POST': form = AssignmentypeForm(request.POST, request.FILES, instance=assignmentype) if form.is_valid(): if (not assignmentype) and (Assignmentype.objects.filter( title=form.cleaned_data['title'])): context['error'] = 'Oups, this assignment title has \ already been used, change it!' else: new_assignmentype = form.save(commit=False) new_assignmentype.prof = prof new_assignmentype.save() # create folder where to upload assignments try: os.mkdir(os.path.join(settings.BASE_DIR, settings.MEDIA_ROOT, 'assignment_%s' % new_assignmentype.id)) except FileExistsError: pass # get list students from csv file try: existing_students, new_students =\ tasks.get_students(new_assignmentype.list_students.path) # return page asking for agreement for creation of students request.session['existing_students'] = existing_students request.session['new_students'] = new_students request.session['assignmentype_pk'] = new_assignmentype.pk return redirect("gradapp:validate_assignmentype_students") except Exception as e: logger.error(make_error_message(e)) new_assignmentype.list_students = None new_assignmentype.save() # return update page of assignmentype return redirect('/reset_assignmentype/%s/' % new_assignmentype.pk) else: form = AssignmentypeForm(instance=assignmentype) context['message'] = message context['form'] = form context['type_post'] = type_post return render(request, 'gradapp/assignmentype_form.html', context)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_assignmentype_students(request):\n existing_students = request.session.get('existing_students', False)\n new_students = request.session.get('new_students', False)\n assignmentype_pk = request.session.get('assignmentype_pk', False)\n if assignmentype_pk:\n tasks.create_assignment(assignmentype_pk,\n existing_students, new_students)\n return redirect('/detail_assignmentype/%s/' % assignmentype_pk)\n else:\n # TODO return error message\n return redirect('gradapp:index')", "def copy_and_add_student(self, new_student, happiness, stress):\n new_room = Room(self.rm_id)\n new_room.students = frozenset(list(self.students) + [new_student])\n new_room.stress = self.stress + stress\n new_room.happiness = self.happiness + happiness\n return new_room", "def add_student(self, name: str, grade: int) -> None:\n school_grade = self.students.setdefault(grade, [])\n school_grade.append(name)\n school_grade.sort()", "def assign_mark(entry: StudentEntry):\n pass", "def role_assignment():\n\n # Create a dictionary of roles keyed by the role name.\n all_roles = {}\n\n try:\n roles = get_permissions_manager().policy_manager.policy_storage.all_roles()\n except PolicyStorageError, e:\n error(None, str(e))\n return\n\n for name, description in roles:\n all_roles[name] = Role(name=name, description=description)\n\n assignment = Assignment()\n view = _AssignmentView(all_roles)\n handler = _AssignmentHandler(all_roles=all_roles)\n\n assignment.edit_traits(view=view, handler=handler)", "def modify_assignmentype(request, pk):\n prof = request.user.prof\n assignmentype = Assignmentype.objects.filter(id=pk, prof=prof).first()\n if assignmentype:\n if request.method == 'POST':\n form = LightAssignmentypeForm(request.POST, instance=assignmentype)\n if form.is_valid():\n form.save()\n return redirect('/detail_assignmentype/%s/' % assignmentype.pk)\n else:\n form = LightAssignmentypeForm(instance=assignmentype)\n context = {}\n context['assignmentype_id'] = assignmentype.id\n context['message'] = 'Modify details of your assignment '\\\n '(keep current student list)'\n context['form'] = form\n context['type_post'] = 'modify'\n return render(request, 'gradapp/assignmentype_form.html', context)\n else:\n return redirect('gradapp:index')", "def create(self, validated_data):\n return Assignment.objects.create(**validated_data)", "def new_assignment(self, fname, lname, codename,\n assignment, duedate, duetime):\n return self._env.get_template('new_assignment.txt').render(\n fname=fname,\n lname=lname,\n codename=codename,\n assignment=assignment,\n duedate=duedate,\n duetime=duetime\n )", "def create_student(faculty: str) -> None:\r\n global usernames, pointer, student_file_info\r\n username = usernames[pointer]\r\n password = username[:6][::-1]\r\n student_file_info.append([username, password, faculty])\r\n pointer += 1", "def _enchance_assignment(self, doc):\n\n results = self.get_archive_items_for_assignment(doc)\n if results.count() > 0:\n doc['item_ids'] = [str(item.get(config.ID_FIELD)) for item in results]\n\n self.set_type(doc, doc)", "def __init__(self, name, skill):\n \n super(Student, self).__init__(name)\n self.grades = []\n self.skill = skill", "def add_an_assignment(cls):\n os.system('clear')\n while True:\n data = Ui.get_inputs(['Start date\\n\\tday(1-31): ', '\\tmonth(1-12): ', '\\tyear(2000+): ',\n 'End date\\n\\tday(1-31): ', '\\tmonth(1-12): ', '\\tyear(2000+): ',\n 'Assignment name\\n\\t'], \"Please provide the assignment details: \\n\")\n try:\n start_date_day = int(data[0])\n start_date_month = int(data[1])\n start_date_year = int(data[2])\n end_date_day = int(data[3])\n end_date_month = int(data[4])\n end_date_year = int(data[5])\n name_of_assign = str(data[6])\n except ValueError:\n Ui.print_message(\"\\nDate must be an integer!\\n\\n\")\n break\n\n if start_date_day > 31 or start_date_day < 1:\n Ui.print_message('\\nStart day value is incorrect')\n else:\n if start_date_month > 12 or start_date_month < 1:\n Ui.print_message('\\nStart month value is incorrect')\n else:\n if start_date_year > 9999 or start_date_year < 2000:\n Ui.print_message('\\nStart year value is incorrect')\n else:\n if end_date_day > 31 or end_date_day < 1:\n Ui.print_message('\\nEnd day value is incorrect')\n else:\n if end_date_month > 12 or end_date_month < 1:\n Ui.print_message('\\nEnd month value is incorrect')\n else:\n if end_date_year > 9999 or end_date_year < 1000:\n Ui.print_message('\\nEnd year value is incorrect')\n else:\n if len(name_of_assign) <= 1:\n Ui.print_message(\"\\nAssignment name have to be longer!\")\n else:\n list_of_names_of_assignments = []\n for i in Assignments.assignments_list:\n list_of_names_of_assignments.append(i.assignment_name)\n if name_of_assign in list_of_names_of_assignments:\n Ui.print_message(\"\\nAssignment name already exist, \"\n \"type another one!\")\n else:\n start_date = '{}-{}-{}'.format(start_date_year,\n start_date_month,\n start_date_day)\n end_date = '{}-{}-{}'.format(end_date_year,\n end_date_month,\n end_date_day)\n new_assignment = cls(start_date, end_date, name_of_assign)\n Assignments.assignments_list.append(new_assignment)\n Ui.print_message(\"\\nAssignment added!\\n\")\n Ui.get_inputs([''], \"Click enter to go back\")\n break # it stops the WHILE loop whenever passed information is incorrect, or assignment has been added", "def add_assignment(cls, mentor_id, title, start_date, end_date, file_name, group='0'):\n new = Assignment(mentor_id, title, start_date, end_date, file_name, group)\n db.session.add(new)\n db.session.commit()", "def assign_permissions(sender, instance, created, **kwargs):\n if created:\n assign_perm('view_strand', instance.owner.group, instance)\n assign_perm('change_strand', instance.saver, instance)\n assign_perm('delete_strand', instance.saver, instance)\n assign_perm('view_strand', instance.saver, instance)", "def set_assignment(self, updates, original=None):\n if not original:\n original = {}\n\n self.set_type(updates, original)\n\n if not updates.get('assigned_to'):\n if updates.get('priority'):\n # Priority was edited - nothing to set here\n return\n else:\n updates['assigned_to'] = {}\n\n assigned_to = updates.get('assigned_to') or {}\n if (assigned_to.get('user') or assigned_to.get('contact')) and not assigned_to.get('desk'):\n raise SuperdeskApiError.badRequestError(message=\"Assignment should have a desk.\")\n\n # set the assignment information\n user = get_user()\n if original.get('assigned_to', {}).get('desk') != assigned_to.get('desk'):\n if original.get('assigned_to', {}).get('state') in \\\n [ASSIGNMENT_WORKFLOW_STATE.IN_PROGRESS, ASSIGNMENT_WORKFLOW_STATE.SUBMITTED]:\n raise SuperdeskApiError.forbiddenError(\n message=\"Assignment linked to content. Desk reassignment not allowed.\")\n\n assigned_to['assigned_date_desk'] = utcnow()\n\n if user and user.get(config.ID_FIELD):\n assigned_to['assignor_desk'] = user.get(config.ID_FIELD)\n\n if assigned_to.get('user') and original.get('assigned_to', {}).get('user') != assigned_to.get('user'):\n assigned_to['assigned_date_user'] = utcnow()\n\n if user and user.get(config.ID_FIELD):\n assigned_to['assignor_user'] = user.get(config.ID_FIELD)\n\n if not original.get(config.ID_FIELD):\n updates['original_creator'] = str(user.get(config.ID_FIELD)) if user else None\n updates['assigned_to'][\n ITEM_STATE] = get_next_assignment_status(updates, updates['assigned_to'].get(ITEM_STATE) or\n ASSIGNMENT_WORKFLOW_STATE.ASSIGNED)\n else:\n # In case user was removed\n if not assigned_to.get('user'):\n assigned_to['user'] = None\n else:\n # Moving from submitted to assigned after user assigned after desk submission\n if original.get('assigned_to')['state'] == ASSIGNMENT_WORKFLOW_STATE.SUBMITTED:\n updates['assigned_to']['state'] = get_next_assignment_status(updates,\n ASSIGNMENT_WORKFLOW_STATE.IN_PROGRESS)\n\n updates['version_creator'] = str(user.get(config.ID_FIELD)) if user else None", "def add_student(self, student):\n if student in self.students:\n raise ValueError('Duplicate Student.')\n self.students.append(student)\n self.grades[student.id] = []\n self.is_sorted = False", "def _save_clicked(self, info):\n\n assignment = self._validate(info)\n if assignment is None:\n return\n\n # Update the data in the database.\n try:\n get_permissions_manager().policy_manager.policy_storage.set_assignment(assignment.user_name, [r.name for r in assignment.roles])\n\n info.ui.dispose()\n except PolicyStorageError, e:\n self._ps_error(e)", "def validate_assignmentype_students(request):\n existing_students = request.session.get('existing_students', False)\n new_students = request.session.get('new_students', False)\n assignmentype_pk = request.session.get('assignmentype_pk', False)\n if assignmentype_pk:\n assignmentype = Assignmentype.objects.get(id=assignmentype_pk)\n return render(request, 'gradapp/validate_assignmentype_students.html',\n {'existing_students': existing_students,\n 'new_students': new_students,\n 'assignmentype': assignmentype})\n else:\n return redirect('gradapp:index')", "def editFromList(id=\"0\"):\n ## when creating a new record, g.countEventID will contain the ID of the countEvent record\n setExits()\n \n data = None\n if not data:\n data = request.form\n \n if \"ID\" in data:\n id = data[\"ID\"]\n \n id = cleanRecordID(id)\n if id < 0:\n flash(\"Invalid Record ID\")\n return redirect(g.listURL)\n \n locations = None\n if id == 0:\n if \"countEvent_ID\" in data:\n g.countEventID = data[\"countEvent_ID\"]\n \n ceID = cleanRecordID(g.countEventID)\n g.orgID = cleanRecordID(g.orgID)\n \n ## It's important to call fetchAll() or fetchOne() after executing sql this way or the\n ## database will be left in a locked state.\n \n sql = 'select ID,locationName from location where organization_ID = %d \\\n and ID not in \\\n (select location_ID from assignment where countEvent_ID = %d) \\\n order by locationName;' \\\n % (g.orgID, ceID)\n \n locations = db.engine.execute(sql).fetchall()\n if len(locations) == 0:\n return \"failure: There are no more Locations to use.\"\n \n \n rec = None\n if id > 0:\n rec = Assignment.query.get(id)\n if not rec:\n flash(printException(\"Could not edit that \"+g.title + \" record. (ID=\"+str(id)+\")\",'error'))\n return redirect(g.listURL)\n \n form = AssignmentEditFromListForm(data, rec)\n \n ## choices need to be assigned before rendering the form\n # AND before attempting to validate it\n form.user_ID.choices = getUserChoices() \n\n if request.method == \"POST\" and form.validate():\n if not rec:\n rec = createNewRecord(form.countEvent_ID.data)\n if not rec:\n return \"failure: Unable to create a new Assignment record\"\n \n rec.location_ID = form.location_ID.data\n rec.countEvent_ID = form.countEvent_ID.data\n rec.user_ID = form.user_ID.data\n try:\n db.session.commit()\n except Exception as e:\n printException(\"Unable to save Assignment from list\", \"error\", e)\n return \"failure: Sorry. Unable to save your changes.\"\n \n return \"success\" # the success function looks for this...\n \n \n assignedUserIDs = ()\n if rec: \n g.countEventID = int(rec.countEvent_ID)\n \n assignedUserIDs = getAssignedUsers(g.countEventID)\n return render_template('assignment/popupEditForm.html', \n form=form, \n locations=locations, \n assigned=assignedUserIDs, \n )", "def create(self, validated_data):\n tags = validated_data.pop(\"tags\", [])\n attachments = validated_data.pop(\"attachments\", [])\n request_user = validated_data.pop(\"request_user\") # this should always be there\n agenda_create = validated_data.pop(\"agenda_create\", None)\n agenda_type = validated_data.pop(\"agenda_type\", None)\n agenda_parent_id = validated_data.pop(\"agenda_parent_id\", None)\n\n assignment = Assignment(**validated_data)\n if has_perm(request_user, \"agenda.can_manage\"):\n assignment.agenda_item_update_information[\"create\"] = agenda_create\n assignment.agenda_item_update_information[\"type\"] = agenda_type\n assignment.agenda_item_update_information[\"parent_id\"] = agenda_parent_id\n\n assignment.save()\n assignment.tags.add(*tags)\n assignment.attachments.add(*attachments)\n inform_changed_data(assignment)\n return assignment", "def add_students() -> None:\r\n faculties = [\"Computer Science\", \"Performing Arts\", \"Engineering\", \"Economics\"]\r\n for faculty in faculties:\r\n for _ in range(50):\r\n create_student(faculty)", "def addStud(self,ID,name,attNr,grade):\n if ID < 0: raise Exception(\"Invalid ID!\")\n parts = name.split(' ')\n if len(parts) < 2: raise Exception('Invalid name!')\n for part in parts:\n if len(part)<3: raise Exception('Invalid name!')\n if attNr < 0: raise Exception('Invalid number of attendances!')\n if grade not in range(0,11): raise Exception('Invalid grade!')\n self.__studRepo.add(Student(ID,name,attNr,grade))", "def addStudent(self, student):\n if student in self.students:\n raise ValueError(\"Duplicate Student\")\n self.students.append(student)\n self.grades[student.getIDNumber()] = []\n self.isSorted = False", "def assign_default_role(course_id, user):\r\n role, __ = Role.objects.get_or_create(course_id=course_id, name=\"Student\")\r\n user.roles.add(role)", "def add_grades(self, subject_name, grade_list, attendance=True): \n\t\n\t\tif (isinstance(subject_name, str) and isinstance(grade_list, list)):\n\t\t\tfor grade in grade_list:\n\t\t\t\tself.grades.setdefault(subject_name, []).append(grade)\n\t\t\tself.attendance += 1 if attendance else 0", "def create_scaffold_assignments_table(self):\n log.info(\"Creating table to store Scaffold genus assignments ...\")\n self.create_table(self.ScaffoldsAssignmentsTable ,self.ScaffoldAssignmentsFields,\n self.ScaffoldAssignmentsTypes)", "def add_student():\n\n\tprint('You must enter the student as is:\\n'\n\t\t\"'First name', 'middle name', 'Last name', 'major', 'major', 'gpa', id_number, 'minor'\"\n\t\t\" 'minor' graduation year, advisor number\\n For example: 'Kyle', 'Jacob', 'Ranney', 'Insurance'\"\n\t\t\", 'Chemistry', 3.0, 93988, 'Biology', 'NULL', 2016, 2234\\n\")\n\t# use sql insert statement\n\t# become familiar with this!\t", "def editStudent(s, number):\n nname = input(\"New Name: \")\n nnumber = input(\"New Number: \")\n ngpa = input(\"New GPA: \")\n nfield = input(\"New Field: \")\n\n deleteStudent(s, number)\n student = Student(nname, nnumber, ngpa, nfield)\n if t.insert(nnumber, student):\n ht.insert(student)\n print(nname, \"edited successfully.\")\n else:\n print(\"new student number is not valid.\")", "def createAssignmentRule(self):\n return _libsbml.Model_createAssignmentRule(self)", "def __init__(self, name):\n self.name = name\n self.maxidy = -1\n self.studentlist = []" ]
[ "0.65984386", "0.57637066", "0.57263076", "0.56991005", "0.56055224", "0.5555415", "0.5539834", "0.55174875", "0.5380153", "0.537248", "0.5371831", "0.5370983", "0.53656816", "0.53413004", "0.5276961", "0.52551144", "0.52273005", "0.5212578", "0.5203159", "0.5169151", "0.516717", "0.51630324", "0.51397854", "0.5120233", "0.5117467", "0.51169986", "0.5113926", "0.51071024", "0.5102647", "0.50913996" ]
0.6075281
1
Insert a question for an assignmentype (pk=pk). The user enters in a form a question to be created (cd=1) or a question to be deleted (cd=1)
def insert_question_assignmentype(request, pk, cd): prof = request.user.prof assignmentype = Assignmentype.objects.filter(id=pk, prof=prof).first() cd = int(cd) if cd == 1: classForm = AddQuestionForm info = 'Add' elif cd == -1: classForm = RemoveQuestionForm info = 'Remove' if assignmentype: if request.method == 'POST': form = classForm(request.POST, nb_questions=assignmentype.nb_questions) if form.is_valid(): question = form.cleaned_data['question'] # Modify attribute question of all associated evalquestion if cd == -1: evalquestions = Evalquestion.objects.filter( evalassignment__assignment__assignmentype=assignmentype, question=question) evalquestions.delete() evalquestions = Evalquestion.objects.filter( evalassignment__assignment__assignmentype=assignmentype, question__gte=question) evalquestions.update(question=F('question') + cd) # Create a new evalquestion for each evalassignment (if cd=1) # and inform that it has to be graded for evalassignment in Evalassignment.objects.filter( assignment__assignmentype=assignmentype): if cd == 1: Evalquestion.objects.create( evalassignment=evalassignment, question=question) evalassignment.reset_grade() elif cd == -1: evalassignment.grade_assignment = None evalassignment.save() # Add a question to the assignmentype assignmentype.nb_questions += cd if cd == 1: if assignmentype.questions_coeff: assignmentype.questions_coeff.insert(question - 1, None) if assignmentype.questions_statement: assignmentype.questions_statement.insert(question - 1, None) assignmentype.save() elif cd == -1: if assignmentype.questions_coeff: del assignmentype.questions_coeff[question - 1] if assignmentype.questions_statement: del assignmentype.questions_statement[question - 1] assignmentype.save() log = tasks.compute_grades_assignmentype(assignmentype.pk) logger.info(log) return redirect('/detail_assignmentype/%s/' % assignmentype.pk) form = classForm(nb_questions=assignmentype.nb_questions) context = {'assignmentype': assignmentype, 'form': form, 'info': info, 'cd': cd} return render(request, 'gradapp/insert_question.html', context) else: return redirect('gradapp:index')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insert_question(self, id):\n cursor = self.conn.cursor()\n cursor.execute(f\"insert into {self.site} values (?)\", (id, ))\n self.conn.commit()\n cursor.close()", "def ask_question():\n title_question = request.form.get(\"title\")\n question = request.form.get(\"question\")\n\n date_string = datetime.today().strftime('%Y-%m-%d')\n \n ask = Question(user_id = session[\"user_id\"],question_created=date_string, title_question = title_question, question = question)\n\n db.session.add(ask)\n db.session.commit()\n\n return \"question added\"", "def inserir_questao():\n try:\n if current_user.is_administrator():\n categorias = Categoria.query.all()\n if request.method == 'POST':\n questao = Questao(\n cod_categoria = request.form['categoria'],\n cod_usuario = current_user.cod_usuario,\n questao = request.form['questao'],\n alternativa1 = request.form['alternativa1'],\n alternativa2 = request.form['alternativa2'],\n alternativa3 = request.form['alternativa3'],\n alternativa4 = request.form['alternativa4'],\n alternativa5 = request.form['alternativa5'],\n alternativa_correta = request.form['resposta']\n )\n db.session.add(questao)\n db.session.commit()\n return listar_questoes()\n return render_template('admin/inserir_questao.html', categorias=categorias)\n return redirect(url_for('main.index'))\n except Exception as e:\n abort(500, e)", "def save(self)->None:\n database.cursor.execute(\"INSERT INTO questions(created_date,created_by,meetup,title,body,votes,upvotes,downvotes) VALUES(%s,%s,%s,%s,%s,%s,%s,%s) RETURNING id\", (\n self.created_on,\n self.created_by,\n self.meet_up,\n self.title,\n self.body,\n self.votes,\n self.upvotes,\n self.downvotes\n ))\n super().save()", "def create_question():\n if request.content_type != \"application/json\":\n abort(415)\n question_text = request.json['question']\n answer = request.json['answer']\n difficulty = request.json['difficulty']\n category = request.json['category']\n\n question_object = Question(question_text, answer, category, difficulty)\n db.session.add(question_object)\n db.session.commit()\n return jsonify({\n \"success\": True\n }), 201", "def submit_question():\n body = request.get_json()\n\n question = body.get('question', None)\n answer = body.get('answer', None)\n difficulty = body.get('difficulty', None)\n category = body.get('category', None)\n\n try:\n\n new_question = Question(\n question=question,\n answer=answer,\n difficulty=difficulty,\n category=category\n )\n\n new_question.insert()\n\n return jsonify({\n 'success': True,\n 'created': new_question.id\n })\n\n except:\n abort(422)", "def add_question():\n data = request.get_json()\n question = data['question']\n answer = data['answer']\n difficulty = data['difficulty']\n category = data['category']\n for key, value in data.items():\n if not value:\n return jsonify({'success': False, 'error': 400,\n 'message': f'{key} field is missing a value'\n }), 400\n new_question = Question(question, answer, category, difficulty)\n new_question.insert()\n return jsonify({'success': True, 'message': 'Question was created',\n 'question': new_question.format()}), 201", "def add_teacher_data(connection,name,tsc_no,subjects,type_of_teacher):\r\n with connection:\r\n connection.execute(INSERT_TEACHER,(name,tsc_no,subjects,type_of_teacher))", "def test_create_new_question(self):\n response = self.client().post('/questions', json=self.new_question)\n body = json.loads(response.data)\n\n question = Question.query.filter_by(id=body['created']).one_or_none()\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(body['success'], True)\n self.assertIsNotNone(question)", "def add_question(self, question: str, question_type: int, answer: [str], manually_grading: bool, points: float,\n test_id: int) -> Optional[int]:\n try:\n new_question = Questions(question=question, question_type=question_type, answer=answer,\n manually_grading=manually_grading, points=points)\n self.session.add(new_question)\n self.session.commit()\n new_questions_tests = QuestionsTests(question_id=new_question.id, test_id=test_id)\n self.session.add(new_questions_tests)\n self.session.commit()\n return new_question.id\n except Exception as excpt:\n self.session.rollback()\n print(f'Couldn\\'t add question: {excpt}')\n return None", "def create_question():\n body = request.get_json()\n\n question_text = body.get('question', None)\n answer = body.get('answer', None)\n category = body.get('category', 1)\n difficulty = body.get('difficulty', 1)\n\n try:\n question = Question(question=question_text,\n answer=answer,\n category=category,\n difficulty=difficulty)\n question.insert()\n\n selection = Question.query.order_by(Question.id).all()\n current_questions = paginate_questions(request, selection)\n\n return jsonify({\n 'success': True,\n 'created': question.id,\n 'questions': current_questions,\n 'total_questions': len(selection)\n })\n\n except Exception:\n abort(422)", "def insert(self, teacher: Teacher):\n sql = f''' INSERT INTO {self.table_name}({','.join([f[0] for f in Teacher.FIELDS])})\n VALUES({('?,' * len(Teacher.FIELDS))[:-1]}) '''\n print(sql)\n teacher_dict = teacher.json_dump()\n print(teacher_dict)\n # assert 1==2\n self.cursor.execute(sql, teacher_dict)\n self.conn.commit()", "def test_create_question(self):\n res = self.client().post('/api/questions', json=self.new_question)\n res_body = json.loads(res.data)\n\n self.assertEqual(res.status_code, 201)\n self.assertTrue(res_body['success'])\n self.assertTrue(res_body['created'])\n \n new_question = Question.query.filter(Question.id == res_body['created']).one_or_none()\n self.assertTrue(new_question)", "def add_post():\n\tt_id = db.survey.insert(\n\t\tquestion = request.vars.question,\n\t\tuser_email = request.vars.email,\n\t\tuser_name = get_user_name_from_email(request.vars.email),\n\t\topt1 = request.vars.opt1,\n\t\topt2 = request.vars.opt2,\n\t\topt3 = request.vars.opt3,\n\t\topt4 = request.vars.opt4,\n\t\t#created_on_human = humanize.naturaltime(datetime.datetime.utcnow()),\n\n\t)\n\tt = db.survey(t_id)\n\treturn response.json(dict(post=t))", "def new_from_post():\n # If you make a post request with a question_id we will assume you want a new question editor\n # we will prepopulate the question new page with data from that question (if it is a valid question id)\n question_id = request.form['question_id'] if request.form['question_id'] else ''\n\n return render_template('questionNew.html', question_id=question_id)", "def create_question(user,title='title',text='text'):\n return Question.objects.create(created_by=user, title=title, text=text)", "def test_add_question_invalid_type(self):\r\n print(\"Add question with invalid type\")\r\n q_text = \"Test 9999\"\r\n q_type = 5\r\n q_required = 0\r\n q_responses = [\"Yes\", \"No\"]\r\n\r\n prev_noQuestions = len(Question.query.all())\r\n self.assertEqual(self.system.add_question(q_text, q_type, q_required, q_responses), 0)\r\n curr_noQuestions = len(Question.query.all())\r\n self.assertEqual(prev_noQuestions, curr_noQuestions)", "def admincreate(object):\n if request.method == \"POST\":\n\n db = get_db()\n execute_string = 'INSERT INTO ' + object.title()\n\n if object == 'post':\n execute_string += '(title, content, authorId, categoryId) VALUES (\"' + request.form['title'] + '\", \"' + request.form[\"content\"] + '\", \"' + request.form[\"authorid\"] + '\", \"' + request.form[\"categoryid\"] + '\")'\n elif object == 'author':\n execute_string += '(name) VALUES (\"' + request.form['name'] + '\")'\n elif object == 'category':\n execute_string += '(name, description) VALUES (\"' + request.form['name'] + '\", \"' + request.form[\"description\"] + '\")'\n\n db.execute(execute_string)\n db.commit()\n return redirect(url_for(\"adminview\", object=object))\n\n return render_template(\"new.html\", object=object, item={})", "def create_question(self, input_title, input_details, user_id):\n try:\n query = (u\"INSERT INTO tbl_questions (question_title, \"\n \"question_details, posted_by) VALUES (%s,%s,%s) \"\n \";\")\n inputs = input_title, input_details, user_id\n return run_query(query, inputs)\n except psycopg2.Error as e:\n print(e)", "def test_add_question(self):\n model = get_model()\n note = add_question(question='Test Ques', answer='Ans', curr_model=model)\n self.assertEqual(genanki.Note, type(note))", "def add_question(self, prompt, correct_answer):\n\n self.prompt = prompt\n self.correct_answer = correct_answer\n self.new_question = super(AbstractExam, self).__init__(question=self.prompt, answer=self.correct_answer) \n\n # adds the new question to the list of exam questions\n self.exam_questions.append(self.q_and_a)", "def createForm(request):\n if request.method == 'POST':\n form = QuestionFormForm(request.POST)\n if form.is_valid():\n #return the uuid so the organization can use that link in the post to connect to the questionform\n formID = form.save().UUID\n #send them the url for the form\n messages.success(request, 'You have made your question form accessible at: ' + request.build_absolute_uri('/post/') + f'apply/{formID}')\n context = {'form': form}\n return render(request, 'scholarship.html', context=context)\n form = QuestionFormForm()\n context = {'form': form}\n return render(request, 'scholarship.html', context=context)", "def create_assignmentype(request, assignmentype_id=None):\n prof = request.user.prof\n context = {}\n if assignmentype_id:\n assignmentype = Assignmentype.objects.get(id=assignmentype_id)\n message = 'Reset your assignment. You can upload a new student list, '\\\n 'but be aware that it will reset the assignment (all former work '\\\n 'will be lost!)'\n type_post = 'reset' # reset the assignmentype\n context['assignmentype_id'] = assignmentype.id\n else:\n assignmentype = None\n message = 'Create a new assignment!'\n type_post = 'create' # new assignmentype\n if request.method == 'POST':\n form = AssignmentypeForm(request.POST, request.FILES,\n instance=assignmentype)\n if form.is_valid():\n if (not assignmentype) and (Assignmentype.objects.filter(\n title=form.cleaned_data['title'])):\n context['error'] = 'Oups, this assignment title has \\\n already been used, change it!'\n else:\n new_assignmentype = form.save(commit=False)\n new_assignmentype.prof = prof\n new_assignmentype.save()\n # create folder where to upload assignments\n try:\n os.mkdir(os.path.join(settings.BASE_DIR,\n settings.MEDIA_ROOT, 'assignment_%s' %\n new_assignmentype.id))\n except FileExistsError:\n pass\n # get list students from csv file\n try:\n existing_students, new_students =\\\n tasks.get_students(new_assignmentype.list_students.path)\n # return page asking for agreement for creation of students\n request.session['existing_students'] = existing_students\n request.session['new_students'] = new_students\n request.session['assignmentype_pk'] = new_assignmentype.pk\n return redirect(\"gradapp:validate_assignmentype_students\")\n except Exception as e:\n logger.error(make_error_message(e))\n new_assignmentype.list_students = None\n new_assignmentype.save()\n # return update page of assignmentype\n return redirect('/reset_assignmentype/%s/' %\n new_assignmentype.pk)\n else:\n form = AssignmentypeForm(instance=assignmentype)\n context['message'] = message\n context['form'] = form\n context['type_post'] = type_post\n return render(request, 'gradapp/assignmentype_form.html', context)", "def test_create_new_question(self):\n\n # get number of questions before post\n questions_before = Question.query.all()\n\n # create new question and load response data\n response = self.client().post('/questions', json=self.new_question)\n data = json.loads(response.data)\n\n # get number of questions after post\n questions_after = Question.query.all()\n\n # see if the question has been created\n question = Question.query.filter_by(id=data['created']).one_or_none()\n\n # check status code and success message\n self.assertEqual(response.status_code, 200)\n self.assertEqual(data['success'], True)\n\n # check if one more question after post\n self.assertTrue(len(questions_after) - len(questions_before) == 1)\n\n # check that question is not None\n self.assertIsNotNone(question)", "def create_question(question_text, days, create_choice=True):\n\n time = timezone.now() + datetime.timedelta(days=days)\n question = Question.objects.create(question_text=question_text, pub_date=time)\n if create_choice:\n question.choice_set.create(choice_text=\"Choice 1\", votes=0)\n return question", "def post(self):\n teacher = self.request.get(\"teacher\")\n temail = self.request.get(\"temail\")\n tphone = self.request.get(\"tphone\")\n specialty = self.request.get(\"specialty\")\n\n if teacher and temail and tphone and specialty:\n\n #create a new teacher object and store it in the database\n teacher = Teacher(\n teacher=teacher,\n temail=temail,\n tphone=tphone, \n specialty=specialty)\n teacher.put()\n\n id = teacher.key().id()\n self.redirect(\"/teacher/%s\" % id)\n else:\n error = \"Please include a teacher, an email, a phone number, and a specialty.\"\n self.render_form(teacher, temail, tphone, specialty, error)", "def test_create_questions(self):\n res = self.client().post('/questions',\n json={\n \"question\": \"What is chemical \\\n composition of water\",\n \"answer\": \"H2O\",\n \"category\": 1,\n \"difficulty\": 2\n })\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['created'])", "def statement_assignmentype(request, pk):\n prof = request.user.prof\n context = {'prof': prof}\n assignmentype = Assignmentype.objects.filter(pk=pk, prof=prof).first()\n if assignmentype:\n nb_questions = assignmentype.nb_questions\n if request.method == 'POST':\n form = StatementForm(request.POST,\n nb_questions=nb_questions)\n if form.is_valid():\n assignmentype.questions_statement =\\\n [form.cleaned_data['statement_%s' % i]\n for i in range(1, assignmentype.nb_questions + 1)]\n assignmentype.save()\n return redirect('/detail_assignmentype/%s/' % pk)\n else:\n questions_statement = assignmentype.questions_statement\n statement = {}\n if questions_statement:\n for i in range(1, nb_questions + 1):\n statement['statement_%s' % i] =\\\n assignmentype.questions_statement[i - 1]\n else:\n statement = dict.fromkeys(['statement_%s' % i\n for i in range(1, nb_questions + 1)],\n None)\n form = StatementForm(nb_questions=nb_questions,\n initial=statement)\n context['form'] = form\n context['assignmentype'] = assignmentype\n return render(request, 'gradapp/statement_assignmentype.html',\n context)\n return redirect('gradapp:list_assignmentypes_running')", "def insert_grade(grade, form, rc):\n dbname = form[\"dbname\"]\n collname = \"grades\"\n try:\n coll = rc.client[dbname][collname]\n except (KeyError, AttributeError):\n abort(404)\n try:\n added = rc.client.insert_one(dbname, collname, grade)\n except Exception:\n traceback.print_exc()\n raise", "def createqn(quizID):\n if not current_user.check_educator():\n return render_template('errors/error403.html'), 403\n quiz = validate_quiz_link(current_user, quizID)\n form = QuestionForm()\n delQuizForm = DeleteForm(prefix='quiz')\n delQnForm = DeleteForm(prefix='qn')\n if form.validate_on_submit():\n #Commit inputs to database\n options = (form.op1.data, form.op2.data, form.op3.data, form.op4.data)\n question = add_question(current_user, form.qn.data, options, form.corrOp.data, form.topic.data)\n if form.img.data:\n question.image_file = update_qn_image(form.img.data)\n add_question_quiz(quiz, question)\n flash('Question added')\n if form.complete.data:\n return redirect(url_for('quiz.createquizsuccess', quizID=quizID))\n return redirect(url_for('quiz.createqn', quizID=quizID))\n\n return render_template('quiz/createqn.html', title=' | Create Quiz', form=form, quiz=quiz,delQuizForm=delQuizForm, delQnForm=delQnForm)" ]
[ "0.67061335", "0.61879146", "0.61045116", "0.60622334", "0.59862596", "0.59348214", "0.58540165", "0.57960665", "0.57643914", "0.5759345", "0.5720324", "0.57190794", "0.56958216", "0.5681573", "0.5672369", "0.56627357", "0.5647007", "0.5626833", "0.5582249", "0.5573245", "0.5568568", "0.5557012", "0.5542489", "0.5507724", "0.5499488", "0.5497407", "0.54939514", "0.54755586", "0.5469123", "0.5461508" ]
0.8077942
0
Modify assignmentype fields, except student list.
def modify_assignmentype(request, pk): prof = request.user.prof assignmentype = Assignmentype.objects.filter(id=pk, prof=prof).first() if assignmentype: if request.method == 'POST': form = LightAssignmentypeForm(request.POST, instance=assignmentype) if form.is_valid(): form.save() return redirect('/detail_assignmentype/%s/' % assignmentype.pk) else: form = LightAssignmentypeForm(instance=assignmentype) context = {} context['assignmentype_id'] = assignmentype.id context['message'] = 'Modify details of your assignment '\ '(keep current student list)' context['form'] = form context['type_post'] = 'modify' return render(request, 'gradapp/assignmentype_form.html', context) else: return redirect('gradapp:index')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _enchance_assignment(self, doc):\n\n results = self.get_archive_items_for_assignment(doc)\n if results.count() > 0:\n doc['item_ids'] = [str(item.get(config.ID_FIELD)) for item in results]\n\n self.set_type(doc, doc)", "def filter_allowed_fields(self):\n allowed_fields = super().filter_allowed_fields\n # Remove assignment_id\n allowed_fields.remove('assignment_id')\n return allowed_fields", "def update(self, instance, validated_data):\n validated_data.pop(\"assignment\", None)\n return super().update(instance, validated_data)", "def _modify(self, fields):\n return fields", "def _update_allowed_fields(self) -> list:\n raise NotImplementedError('Each model has to have its list of update allowed fields')", "def setType(self,newtype):\n\t\tself.type = newtype;", "def statement_assignmentype(request, pk):\n prof = request.user.prof\n context = {'prof': prof}\n assignmentype = Assignmentype.objects.filter(pk=pk, prof=prof).first()\n if assignmentype:\n nb_questions = assignmentype.nb_questions\n if request.method == 'POST':\n form = StatementForm(request.POST,\n nb_questions=nb_questions)\n if form.is_valid():\n assignmentype.questions_statement =\\\n [form.cleaned_data['statement_%s' % i]\n for i in range(1, assignmentype.nb_questions + 1)]\n assignmentype.save()\n return redirect('/detail_assignmentype/%s/' % pk)\n else:\n questions_statement = assignmentype.questions_statement\n statement = {}\n if questions_statement:\n for i in range(1, nb_questions + 1):\n statement['statement_%s' % i] =\\\n assignmentype.questions_statement[i - 1]\n else:\n statement = dict.fromkeys(['statement_%s' % i\n for i in range(1, nb_questions + 1)],\n None)\n form = StatementForm(nb_questions=nb_questions,\n initial=statement)\n context['form'] = form\n context['assignmentype'] = assignmentype\n return render(request, 'gradapp/statement_assignmentype.html',\n context)\n return redirect('gradapp:list_assignmentypes_running')", "def set_specific_fields(self):\n raise NotImplementedError(\"Must be defined by subclass!\")", "def test_change_domain_type_assignment_rule(self):\n pass", "def set_assignment(self, updates, original=None):\n if not original:\n original = {}\n\n self.set_type(updates, original)\n\n if not updates.get('assigned_to'):\n if updates.get('priority'):\n # Priority was edited - nothing to set here\n return\n else:\n updates['assigned_to'] = {}\n\n assigned_to = updates.get('assigned_to') or {}\n if (assigned_to.get('user') or assigned_to.get('contact')) and not assigned_to.get('desk'):\n raise SuperdeskApiError.badRequestError(message=\"Assignment should have a desk.\")\n\n # set the assignment information\n user = get_user()\n if original.get('assigned_to', {}).get('desk') != assigned_to.get('desk'):\n if original.get('assigned_to', {}).get('state') in \\\n [ASSIGNMENT_WORKFLOW_STATE.IN_PROGRESS, ASSIGNMENT_WORKFLOW_STATE.SUBMITTED]:\n raise SuperdeskApiError.forbiddenError(\n message=\"Assignment linked to content. Desk reassignment not allowed.\")\n\n assigned_to['assigned_date_desk'] = utcnow()\n\n if user and user.get(config.ID_FIELD):\n assigned_to['assignor_desk'] = user.get(config.ID_FIELD)\n\n if assigned_to.get('user') and original.get('assigned_to', {}).get('user') != assigned_to.get('user'):\n assigned_to['assigned_date_user'] = utcnow()\n\n if user and user.get(config.ID_FIELD):\n assigned_to['assignor_user'] = user.get(config.ID_FIELD)\n\n if not original.get(config.ID_FIELD):\n updates['original_creator'] = str(user.get(config.ID_FIELD)) if user else None\n updates['assigned_to'][\n ITEM_STATE] = get_next_assignment_status(updates, updates['assigned_to'].get(ITEM_STATE) or\n ASSIGNMENT_WORKFLOW_STATE.ASSIGNED)\n else:\n # In case user was removed\n if not assigned_to.get('user'):\n assigned_to['user'] = None\n else:\n # Moving from submitted to assigned after user assigned after desk submission\n if original.get('assigned_to')['state'] == ASSIGNMENT_WORKFLOW_STATE.SUBMITTED:\n updates['assigned_to']['state'] = get_next_assignment_status(updates,\n ASSIGNMENT_WORKFLOW_STATE.IN_PROGRESS)\n\n updates['version_creator'] = str(user.get(config.ID_FIELD)) if user else None", "def assign_mark(entry: StudentEntry):\n pass", "def clean_fields(self, *args, **kwargs):\n if self.saan:\n self.saan = self.saan.upper() # only in CWR, uppercase anyway\n super().clean_fields(*args, **kwargs)", "def create_assignmentype(request, assignmentype_id=None):\n prof = request.user.prof\n context = {}\n if assignmentype_id:\n assignmentype = Assignmentype.objects.get(id=assignmentype_id)\n message = 'Reset your assignment. You can upload a new student list, '\\\n 'but be aware that it will reset the assignment (all former work '\\\n 'will be lost!)'\n type_post = 'reset' # reset the assignmentype\n context['assignmentype_id'] = assignmentype.id\n else:\n assignmentype = None\n message = 'Create a new assignment!'\n type_post = 'create' # new assignmentype\n if request.method == 'POST':\n form = AssignmentypeForm(request.POST, request.FILES,\n instance=assignmentype)\n if form.is_valid():\n if (not assignmentype) and (Assignmentype.objects.filter(\n title=form.cleaned_data['title'])):\n context['error'] = 'Oups, this assignment title has \\\n already been used, change it!'\n else:\n new_assignmentype = form.save(commit=False)\n new_assignmentype.prof = prof\n new_assignmentype.save()\n # create folder where to upload assignments\n try:\n os.mkdir(os.path.join(settings.BASE_DIR,\n settings.MEDIA_ROOT, 'assignment_%s' %\n new_assignmentype.id))\n except FileExistsError:\n pass\n # get list students from csv file\n try:\n existing_students, new_students =\\\n tasks.get_students(new_assignmentype.list_students.path)\n # return page asking for agreement for creation of students\n request.session['existing_students'] = existing_students\n request.session['new_students'] = new_students\n request.session['assignmentype_pk'] = new_assignmentype.pk\n return redirect(\"gradapp:validate_assignmentype_students\")\n except Exception as e:\n logger.error(make_error_message(e))\n new_assignmentype.list_students = None\n new_assignmentype.save()\n # return update page of assignmentype\n return redirect('/reset_assignmentype/%s/' %\n new_assignmentype.pk)\n else:\n form = AssignmentypeForm(instance=assignmentype)\n context['message'] = message\n context['form'] = form\n context['type_post'] = type_post\n return render(request, 'gradapp/assignmentype_form.html', context)", "def get_readonly_fields(self, request, obj):\n # FIXME(matzf) conceptually, an AS can change the ISD. Not allowed for now\n # as I anticipate this may unnecessarily complicate the TRC/certificate\n # update logic. Should be revisited.\n # TODO(matzf): Changing is_core should also be possible, not yet implemented\n # Requires removing core links etc, bump signed certificates\n if obj:\n return ('isd', 'is_core', 'as_id',)\n return ()", "def _update(self, data: Dict[str, Any], fields_to_modify: List[str]):\n pass", "def edit_parametertype(request, parametertype, **_kwargs):\n pass", "def fix_fields(self):\n males = self.df[\"Sex\"] == \"M\"\n self.df[\"Sex\"] = np.array(males, dtype=int)\n\n logger.debug(\"Fixing bounded values...\")\n self.fix_bounded_values()\n logger.debug(\"Fixing range values...\")\n self.fix_range_fields()\n logger.debug(\"Fixing keyworded fields...\")\n self.fix_keyword_fields()\n logger.debug(\"Fixing temperature fields...\")\n self.fix_temperature_fields()\n logger.debug(\"Fixing nationality fields...\")\n self.fix_nationality_field()\n logger.debug(\"Fixing percentage fields...\")\n self.fix_percentage_fields()\n logger.debug(\"Combining fields...\")\n self.combine_fields()", "def set_modulation_type(self, mod_type):\n if mod_type not in [0, 1, 2]:\n raise ValueError(\n \"ERROR: Invalid input for modulation type. Allowed values are 0, 1 or 2\")\n\n self._mod_type = mod_type", "def test_change_asset_type_assignment_rule(self):\n pass", "def retype(self, dictionary):\r\n\r\n for name, retype in dictionary.items():\r\n field = self._field_dict[name]\r\n for key, value in retype.items():\r\n if key in _valid_retype_attributes:\r\n field.__setattr__(key, value)\r\n else:\r\n raise Exception(\"Should not use retype to change field attribute '%s'\", key)", "def setField(self, data):\n\t\tview = self.view\n\t\tview.sbAbstraccion.setValue(data['sbAbstraccion'])", "def set_data(self,pdata):\n self.uid.data=pdata[0]\n self.pid.data=pdata[1]\n self.pName.data=pdata[2]\n self.pAge.data=pdata[3]\n self.dateOfSubmission.data=pdata[4]\n self.bedType.data=pdata[5]\n self.address.data=pdata[6]\n self.city.data=pdata[7]\n self.state.data=pdata[8]\n self.status.data=pdata[9]", "def pass_assign_for_mentor(cls):\n assignments_list = cls.list_from_sql()\n return assignments_list", "def _onchange_field(self):\n if not self.secretary_contact_id:\n return\n if self.partner_type in ['dr', 'patient', 'secretary']:\n self.update({\n 'secretary_contact_id': False\n })", "def _getBlockTypesToModify(self):\n raise NotImplementedError", "def KLP_ReAssign_Permissions(request, permissionType):\n\n # check logged in user permissions\n\n KLP_user_Perm(request.user, 'Users', None)\n\n # get selected users list\n\n userList = request.POST.getlist('userId')\n permissions = ['Acess']\n opStatus = 'success'\n try:\n if permissionType == 'permissions':\n\n # if permissionsType is permissions assign instituions to user\n\n inst_list = request.POST.getlist('unassignedInst') # get selected institution list\n (a, b, c) = assignPermission( # call assignPermission method to assign permission\n inst_list,\n userList,\n permissions,\n permissionType,\n None,\n True,request.user.username,request.path_info\n )\n else:\n\n # else assign assessments to user\n\n asmList = request.POST.getlist('unassignedAsm') # get selected assesment and institution list\n for asm in asmList:\n asm_list = asm.split('_')\n inst_list = [asm_list[0]]\n assessmentId = asm_list[1]\n (a, b, c) = assignPermission(inst_list, userList,\n permissions, permissionType, assessmentId,None,request.user.username,request.path_info) # call assignPermission method to assign permission\n except:\n opStatus = 'fail'\n\n # if reassign permission fail return response as fail else return success.....\n\n return HttpResponse(opStatus)", "def modify_an_entry(self):\n target_list = self.find_student()\n\n if not len(target_list):\n print('There is no contents to show')\n else:\n opt = self.input_options(['midterm', 'finalterm'], 1, 'Which test do you want to modify?')\n score = self.input_score()\n\n if opt.upper() == 'MIDTERM':\n for idx in target_list.index:\n self.student_list.loc[self.student_list.index == idx, 'midterm'] = score\n else:\n for idx in target_list.index:\n self.student_list.loc[self.student_list.index == idx, 'finalterm'] = score", "def _update_input_type(self):\n pass", "def validate_assignmentype_students(request):\n existing_students = request.session.get('existing_students', False)\n new_students = request.session.get('new_students', False)\n assignmentype_pk = request.session.get('assignmentype_pk', False)\n if assignmentype_pk:\n assignmentype = Assignmentype.objects.get(id=assignmentype_pk)\n return render(request, 'gradapp/validate_assignmentype_students.html',\n {'existing_students': existing_students,\n 'new_students': new_students,\n 'assignmentype': assignmentype})\n else:\n return redirect('gradapp:index')", "def save(self, *args, **kwargs):\n self.entity_type = \"Person\"\n super().save(*args, **kwargs)" ]
[ "0.5704868", "0.54479146", "0.53199524", "0.5250077", "0.5214731", "0.5099856", "0.50951463", "0.5083383", "0.50801265", "0.503171", "0.5029087", "0.5015462", "0.500106", "0.4976095", "0.49526381", "0.49027774", "0.4893022", "0.4884994", "0.48844108", "0.48718646", "0.4853452", "0.4800415", "0.47855747", "0.4780223", "0.4763925", "0.47515717", "0.47477162", "0.47235602", "0.47059226", "0.46918932" ]
0.62879336
0
Delete assignmentype with id=pk and redirect to list of running assignmentype if type_list=='1', and to list of archived assignmentype if type_list=='0'
def delete_assignmentype(request, pk, type_list): prof = request.user.prof assignmentype = Assignmentype.objects.filter(id=pk, prof=prof).first() if assignmentype: assignmentype.delete() if type_list == '1': return redirect('gradapp:list_assignmentypes_running') elif type_list == '0': return redirect('gradapp:list_assignmentypes_archived') else: return redirect('gradapp:index')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def archive_assignmentype(request, pk):\n prof = request.user.prof\n assignmentype = Assignmentype.objects.filter(id=pk, prof=prof).first()\n if assignmentype:\n assignmentype.archived = True\n assignmentype.save()\n return redirect('gradapp:list_assignmentypes_archived')\n else:\n return redirect('gradapp:index')", "def remove_data(request, data_type, data_pk, subject_pk, item_pk):\n subject = get_object_or_404(Subject, pk=subject_pk)\n item = get_object_or_404(Item, pk=item_pk)\n\n if data_type == 'modal_section':\n modalSection = get_object_or_404(ModalSection, pk=data_pk)\n modalSection.delete()\n elif data_type == 'modal_data':\n modalData = get_object_or_404(ModalData, pk=data_pk)\n modalData.delete()\n\n return redirect('modal', subject.id, item.id)", "def delete_parametertype(request, parametertype, **_kwargs):\n pass", "def unlink(self, cr, uid, ids, context=None):\n allowances_archive = self.read(cr, uid, ids, ['transfer','state'], context=context)\n unlink_ids = []\n for record in allowances_archive:\n if record['transfer'] == False and record['state'] in ['draft','cancel']:\n unlink_ids.append(record['id'])\n else:\n raise osv.except_osv(_('Invalid action !'), _('Sorry you can not Delete this record(s), Because The request is in Process , You have To cancelled Firest or It already Transtered To account Voucher!'))\n for id in unlink_ids:\n allowances_archive_name = self.browse(cr, uid, id, context=context).name\n message = _(\"Env and Safety allowances archive '%s' has been deleted.\") % allowances_archive_name\n self.log(cr, uid, id, message)\n return super(env_and_safety_allowances_archive, self).unlink(cr, uid, unlink_ids, context=context)", "def _delete_task_assignment(request, task_id, task_assignment_id):\n try:\n task = Task.objects.get(id=task_id)\n except ObjectDoesNotExist:\n messages.error(request, 'Cannot find Task with ID {}'.format(task_id))\n return redirect(index)\n try:\n task_assignment = TaskAssignment.objects.get(id=task_assignment_id)\n except ObjectDoesNotExist:\n messages.error(request,\n 'Cannot find Task Assignment with ID {}'.format(task_assignment_id))\n return redirect(index)\n\n if task_assignment.completed:\n messages.error(request, u\"The Task can't be returned because it has been completed\")\n return redirect(index)\n if request.user.is_authenticated:\n if task_assignment.assigned_to != request.user:\n messages.error(request, 'The Task you are trying to return belongs to another user')\n return redirect(index)\n else:\n if task_assignment.assigned_to is not None:\n messages.error(request, 'The Task you are trying to return belongs to another user')\n return redirect(index)\n if task.batch.project.login_required:\n messages.error(request, 'You do not have permission to access this Task')\n return redirect(index)\n\n task_assignment.delete()", "def list_assignmentypes_archived(request):\n prof = request.user.prof\n context = {'type_assignmentype': 'archived', 'prof': prof}\n context['list_assignmentypes'] = Assignmentype.objects.\\\n filter(archived=True, prof=prof)\n return render(request, 'gradapp/list_assignmentype.html',\n context)", "def modify_assignmentype(request, pk):\n prof = request.user.prof\n assignmentype = Assignmentype.objects.filter(id=pk, prof=prof).first()\n if assignmentype:\n if request.method == 'POST':\n form = LightAssignmentypeForm(request.POST, instance=assignmentype)\n if form.is_valid():\n form.save()\n return redirect('/detail_assignmentype/%s/' % assignmentype.pk)\n else:\n form = LightAssignmentypeForm(instance=assignmentype)\n context = {}\n context['assignmentype_id'] = assignmentype.id\n context['message'] = 'Modify details of your assignment '\\\n '(keep current student list)'\n context['form'] = form\n context['type_post'] = 'modify'\n return render(request, 'gradapp/assignmentype_form.html', context)\n else:\n return redirect('gradapp:index')", "def delete(request, content_type, object_id):\n user = request.user\n content_type_object = ContentType.objects.get(id = content_type)\n node = content_type_object.model_class().objects.get(id = object_id)\n community_wiki.delete_content(node)\n \n redirect_url = reverse('content-list-redirect', args=[content_type_object.id])\n return http.HttpResponseRedirect(redirect_url)", "def management_delete(request, slug,id):\n \n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n\n if request.method == 'POST':\n return HttpResponseRedirect('/company/'+str(slug))\n else: \n #verifies if the company exists if not returns a 404 page\n management_reference = get_object_or_404(Management, id=id,company=company)\n\n #deletes the view and redirects to the page.\n management_reference.delete()\n return HttpResponseRedirect('/company/'+str(slug))", "def KLP_Revoke_Permissions(request, permissionType):\n\n # check logged in user permissions\n\n KLP_user_Perm(request.user, 'Users', None)\n\n # get user id to revoke permissions\n\n user_id = request.POST.get('userId')\n opStatus = 'success'\n try:\n if permissionType == 'permissions':\n\n # if permissiontype is permissions revoke institution permissions for the user\n\n userObj = User.objects.get(pk=user_id)\n\n # get institution list to revoke\n\n instList = request.POST.getlist('assignedInst')\n for inst_id in instList:\n instObj = Institution.objects.get(pk=inst_id)\n\n # revoke permission for user\n\n userObj.revoke('Acess', instObj)\n else:\n\n # else revoke assessment permissions\n\n assignedAsmList = request.POST.getlist('assignedAsm')\n for userAsm_id in assignedAsmList:\n\n # get UserAssessmentPermissions object\n\n permObj = \\\n UserAssessmentPermissions.objects.get(pk=userAsm_id)\n permObj.access = False # revoke permissions\n permObj.save()\n except:\n opStatus = 'fail'\n\n # if revoke permission fail return response as fail else return success.\n\n return HttpResponse(opStatus)", "def usertype_delete(request, simulation, demandsegment):\n # With CASCADE attribute, everything should be delete (demand segment, user\n # type, distributions, matrix and matrix points).\n demandsegment.delete()\n simulation.has_changed = True\n simulation.save()\n return HttpResponseRedirect(\n reverse('metro:demand_view', args=(simulation.id,))\n )", "def deleteCompiles():\n if webapp.config['COMPILE_OFF']:\n return returnError(\"Compilation Features are not available\", 400)\n\n\n deleteList = request.form.getlist(\"delete_compile\")\n for uid in deleteList:\n logger.info(\"[FLASKWEB /delete/compiles] DELETING compile job uid=\" + uid)\n job = db.getCompiles(uid=uid)[0]\n db.deleteCompile(job['uid'])\n return redirect(url_for('listJobs')), 302", "def delete(self, *args, **kwargs):\n try:\n # pylint: disable=no-member\n public_course_run = self.public_course_run\n except CourseRun.DoesNotExist:\n pass\n else:\n if public_course_run.state[\"priority\"] < CourseState.TO_BE_SCHEDULED:\n self.direct_course.extended_object.title_set.update(\n publisher_state=PUBLISHER_STATE_DIRTY\n ) # mark page dirty in all languages\n return super().delete(*args, **kwargs)", "def KLP_ReAssign_Permissions(request, permissionType):\n\n # check logged in user permissions\n\n KLP_user_Perm(request.user, 'Users', None)\n\n # get selected users list\n\n userList = request.POST.getlist('userId')\n permissions = ['Acess']\n opStatus = 'success'\n try:\n if permissionType == 'permissions':\n\n # if permissionsType is permissions assign instituions to user\n\n inst_list = request.POST.getlist('unassignedInst') # get selected institution list\n (a, b, c) = assignPermission( # call assignPermission method to assign permission\n inst_list,\n userList,\n permissions,\n permissionType,\n None,\n True,request.user.username,request.path_info\n )\n else:\n\n # else assign assessments to user\n\n asmList = request.POST.getlist('unassignedAsm') # get selected assesment and institution list\n for asm in asmList:\n asm_list = asm.split('_')\n inst_list = [asm_list[0]]\n assessmentId = asm_list[1]\n (a, b, c) = assignPermission(inst_list, userList,\n permissions, permissionType, assessmentId,None,request.user.username,request.path_info) # call assignPermission method to assign permission\n except:\n opStatus = 'fail'\n\n # if reassign permission fail return response as fail else return success.....\n\n return HttpResponse(opStatus)", "def delete_all(submission_client, program, project, batch_size=200, types=['submitted_methylation', 'aliquot', 'sample', 'demographic', 'case', 'experiment']):\n for t in types:\n print('{}-{}.{}'.format(program, project, t))\n try:\n delete_type(submission_client, program, project, batch_size, t)\n except Exception as e:\n print(e)", "def on_delete(self, doc):\n if doc.get('_to_delete') is True:\n # Already marked for delete - no validation needed (could be the background job)\n return\n\n # Also make sure the Planning item is locked by this user and session\n planning_service = get_resource_service('planning')\n planning_item = planning_service.find_one(req=None, _id=doc.get('planning_item'))\n planning_item_state = (planning_item or {}).get('state')\n\n if planning_item_state != WORKFLOW_STATE.SPIKED:\n if not self.is_associated_planning_or_event_locked(planning_item):\n raise SuperdeskApiError.forbiddenError(\n message='Lock is not obtained on the associated Planning item or Event'\n )\n\n # Make sure the Assignment is locked by this user and session\n assignment_locked = is_locked_in_this_session(doc)\n if planning_item_state in [WORKFLOW_STATE.KILLED, WORKFLOW_STATE.SPIKED] and\\\n (not doc.get('lock_user') or assignment_locked):\n assignment_locked = True\n\n if not assignment_locked:\n raise SuperdeskApiError.forbiddenError(\n message='Lock is not obtained on the Assignment item'\n )\n\n # Make sure the content linked to assignment (if) is also not locked\n # This is needed when the planing item is being unposted/spiked\n archive_items = self.get_archive_items_for_assignment(doc)\n for archive_item in archive_items:\n if archive_item.get('lock_user') and not is_locked_in_this_session(archive_item):\n raise SuperdeskApiError.forbiddenError(message='Associated archive item is locked')\n\n # Make sure we cannot delete a completed Assignment\n # This should not be needed, as you cannot obtain a lock on an Assignment that is completed\n # But keeping it here for completeness\n if doc['assigned_to'].get('state') == ASSIGNMENT_WORKFLOW_STATE.COMPLETED:\n raise SuperdeskApiError.badRequestError(\n message='Cannot delete a completed Assignment'\n )", "def delete_patchset(request):\n request.patchset.nuke()\n return HttpResponseRedirect(reverse(show, args=[request.issue.key.id()]))", "def delete_selected(modeladmin, request, queryset):\n related_transactions = helpers.pre_delete_processes(modeladmin, request, queryset)\n response = actions.delete_selected(modeladmin, request, queryset)\n if response is None:\n helpers.post_delete_processes(modeladmin, request, related_transactions)\n return response", "def plan_operativo_eliminar(request, **kwargs):\n if request.method == 'DELETE' or request.method == 'GET':\n try:\n plan = PlanOperativo.objects.get(**kwargs)\n plan_estrategico_id = plan.plan_estrategico.id\n if plan.delete():\n if not request.is_ajax():\n messages.success(request, 'Plan Operativo eliminado')\n return redirect('planificacion:plan_estrategico_detalle', plan_estrategico_id)\n except ObjectDoesNotExist:\n if not request.is_ajax():\n messages.error(request, 'No se encontró el plan a eliminar')\n except Exception:\n if not request.is_ajax():\n messages.error(request, 'Error al eliminar el plan operativo')\n elif not request.is_ajax():\n messages.error(request, 'Petición inválida!!')\n if kwargs.get('id'):\n return redirect('planificacion:plan_operativo_detalle', kwargs.get('id'))\n return redirect('planificacion:plan_operativo_lista')", "def delPermission(self,request):\n request.needAuthType(request.ADMIN)\n request.checkArgs(\"admin_username\",\"perm_name\")\n request.getAuthNameObj().canDo(\"CHANGE ADMIN PERMISSIONS\")\n perm_actions.getActionManager().deletePermission(request[\"admin_username\"],request[\"perm_name\"])", "def delete_rules(self, cr, uid, context=None):\r\n action_ids = self.base_action_rule.search(cr, uid, [('model', '=', self.model._name)], context=context)\r\n return self.base_action_rule.unlink(cr, uid, action_ids, context=context)", "def cleanup_docrules_permissions():\n content_type, created = ContentType.objects.get_or_create(app_label='rule', model='', name='document type')\n permissions = Permission.objects.filter(content_type=content_type)\n for p in permissions:\n p.delete()\n #print 'Deleted all permissions for each DocumentTypeRule()'", "def delete(self, request, *args, **kwargs):\n\t\ttask_object = self.get_object()\n\t\tsuccess_url = self.get_success_url()\n\t\ttask_object.is_deleted =1\n\t\ttask_object.save()\n\t\treturn HttpResponseRedirect(success_url)", "def delete(self, something):\n if something == Concept:\n number = 0\n target_list = self.concept_list\n elif something == Subcategory:\n number = 1\n target_list = self.concept_list\n elif something == Relation:\n number = 2\n target_list = self.relation_list\n if target_list.currentIndex().isValid():\n something = target_list.selectedItems()[0].data(Qt.UserRole)[number]\n self.db.delete(something)\n self.search()", "def list_assignmentypes_running(request):\n prof = request.user.prof\n context = {'type_assignmentype': 'running', 'prof': prof}\n context['list_assignmentypes'] = Assignmentype.objects.\\\n filter(archived=False, prof=prof).order_by('deadline_submission')\n return render(request, 'gradapp/list_assignmentype.html',\n context)", "def delete_model(self, request, instance):\n pass", "def actividad_eliminar(request, **kwargs):\n if request.method == 'DELETE' or request.method == 'GET':\n try:\n actividad = Actividad.objects.get(**kwargs)\n meta_anual = actividad.meta_anual.id\n if actividad.delete():\n meta_anual.distribuir_porcentaje()\n if not request.is_ajax():\n messages.success(request, 'Elemento eliminado')\n return redirect('planificacion:meta_anual_detalle', meta_anual.id)\n except ObjectDoesNotExist:\n if not request.is_ajax():\n messages.error(request, 'No se encontró la actividad a eliminar')\n except Exception:\n if not request.is_ajax():\n messages.error(request, 'Error al eliminar la actividad')\n elif not request.is_ajax():\n messages.error(request, 'Petición inválida!!')\n if kwargs.get('id'):\n return redirect('planificacion:activiadad_detalle', kwargs.get('id'))\n return redirect('planificacion:plan_operativo_lista')", "def post_delete_access_attempt(self, instance, **kwargs):", "def office_delete(request, slug,id):\n \n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n\n if request.method == 'POST':\n return HttpResponseRedirect('/company/'+str(slug))\n else: \n #verifies if the company exists if not returns a 404 page\n office_reference = get_object_or_404(Office, id=id,company=company)\n\n #deletes the view and redirects to the page.\n office_reference.delete()\n return HttpResponseRedirect('/company/'+str(slug))", "def delete(request, shoppinglist_id):\n Shoppinglist.objects.filter(pk=shoppinglist_id,\n pantry__owner=request.user).delete()\n return redirect('blackem.users.views.home')" ]
[ "0.6607052", "0.58759385", "0.5581794", "0.5503855", "0.5462696", "0.5460969", "0.5406829", "0.5383195", "0.5348376", "0.53082585", "0.5293434", "0.5274261", "0.52701956", "0.5247633", "0.5216509", "0.5201757", "0.5200437", "0.51928216", "0.5189939", "0.5187883", "0.51853114", "0.5184345", "0.5184317", "0.517376", "0.5162106", "0.51598394", "0.5153235", "0.5153016", "0.5150069", "0.51474583" ]
0.86916167
0
When creating an assignment, shows students that will be associated to (existing students and new students). If validated, new students are created and new+existing students are associated to the assignment.
def validate_assignmentype_students(request): existing_students = request.session.get('existing_students', False) new_students = request.session.get('new_students', False) assignmentype_pk = request.session.get('assignmentype_pk', False) if assignmentype_pk: assignmentype = Assignmentype.objects.get(id=assignmentype_pk) return render(request, 'gradapp/validate_assignmentype_students.html', {'existing_students': existing_students, 'new_students': new_students, 'assignmentype': assignmentype}) else: return redirect('gradapp:index')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_assignmentype_students(request):\n existing_students = request.session.get('existing_students', False)\n new_students = request.session.get('new_students', False)\n assignmentype_pk = request.session.get('assignmentype_pk', False)\n if assignmentype_pk:\n tasks.create_assignment(assignmentype_pk,\n existing_students, new_students)\n return redirect('/detail_assignmentype/%s/' % assignmentype_pk)\n else:\n # TODO return error message\n return redirect('gradapp:index')", "def add_an_assignment(cls):\n os.system('clear')\n while True:\n data = Ui.get_inputs(['Start date\\n\\tday(1-31): ', '\\tmonth(1-12): ', '\\tyear(2000+): ',\n 'End date\\n\\tday(1-31): ', '\\tmonth(1-12): ', '\\tyear(2000+): ',\n 'Assignment name\\n\\t'], \"Please provide the assignment details: \\n\")\n try:\n start_date_day = int(data[0])\n start_date_month = int(data[1])\n start_date_year = int(data[2])\n end_date_day = int(data[3])\n end_date_month = int(data[4])\n end_date_year = int(data[5])\n name_of_assign = str(data[6])\n except ValueError:\n Ui.print_message(\"\\nDate must be an integer!\\n\\n\")\n break\n\n if start_date_day > 31 or start_date_day < 1:\n Ui.print_message('\\nStart day value is incorrect')\n else:\n if start_date_month > 12 or start_date_month < 1:\n Ui.print_message('\\nStart month value is incorrect')\n else:\n if start_date_year > 9999 or start_date_year < 2000:\n Ui.print_message('\\nStart year value is incorrect')\n else:\n if end_date_day > 31 or end_date_day < 1:\n Ui.print_message('\\nEnd day value is incorrect')\n else:\n if end_date_month > 12 or end_date_month < 1:\n Ui.print_message('\\nEnd month value is incorrect')\n else:\n if end_date_year > 9999 or end_date_year < 1000:\n Ui.print_message('\\nEnd year value is incorrect')\n else:\n if len(name_of_assign) <= 1:\n Ui.print_message(\"\\nAssignment name have to be longer!\")\n else:\n list_of_names_of_assignments = []\n for i in Assignments.assignments_list:\n list_of_names_of_assignments.append(i.assignment_name)\n if name_of_assign in list_of_names_of_assignments:\n Ui.print_message(\"\\nAssignment name already exist, \"\n \"type another one!\")\n else:\n start_date = '{}-{}-{}'.format(start_date_year,\n start_date_month,\n start_date_day)\n end_date = '{}-{}-{}'.format(end_date_year,\n end_date_month,\n end_date_day)\n new_assignment = cls(start_date, end_date, name_of_assign)\n Assignments.assignments_list.append(new_assignment)\n Ui.print_message(\"\\nAssignment added!\\n\")\n Ui.get_inputs([''], \"Click enter to go back\")\n break # it stops the WHILE loop whenever passed information is incorrect, or assignment has been added", "def added_student():\n\n first = request.form.get('first')\n last = request.form.get('last')\n github = request.form.get('github')\n\n hackbright.make_new_student(first, last, github)\n\n return render_template(\"student_confirmed.html\",\n first=first,\n last=last,\n github=github)", "def added_student():\n\n first = request.form.get('first_name')\n last = request.form.get('last_name')\n github = request.form.get('github')\n\n hackbright.make_new_student(first, last, github)\n first, last, github = hackbright.get_student_by_github(github)\n\n html = render_template(\"student_added.html\", first=first, last=last, github=github)\n\n return html", "def add_student():\n # import pdb; pdb.set_trace()\n if request.method == \"POST\":\n\n first = request.form.get('first_name')\n last = request.form.get('last_name')\n github = request.form.get('github')\n\n hackbright.make_new_student(first, last, github)\n\n html = render_template(\"added_student_confirmation.html\",\n first=first,\n last=last,\n github=github)\n\n return html", "def add_student():\n\n\tprint('You must enter the student as is:\\n'\n\t\t\"'First name', 'middle name', 'Last name', 'major', 'major', 'gpa', id_number, 'minor'\"\n\t\t\" 'minor' graduation year, advisor number\\n For example: 'Kyle', 'Jacob', 'Ranney', 'Insurance'\"\n\t\t\", 'Chemistry', 3.0, 93988, 'Biology', 'NULL', 2016, 2234\\n\")\n\t# use sql insert statement\n\t# become familiar with this!\t", "def add_student():\n\n return render_template(\"student_add.html\")", "def create_assignmentype(request, assignmentype_id=None):\n prof = request.user.prof\n context = {}\n if assignmentype_id:\n assignmentype = Assignmentype.objects.get(id=assignmentype_id)\n message = 'Reset your assignment. You can upload a new student list, '\\\n 'but be aware that it will reset the assignment (all former work '\\\n 'will be lost!)'\n type_post = 'reset' # reset the assignmentype\n context['assignmentype_id'] = assignmentype.id\n else:\n assignmentype = None\n message = 'Create a new assignment!'\n type_post = 'create' # new assignmentype\n if request.method == 'POST':\n form = AssignmentypeForm(request.POST, request.FILES,\n instance=assignmentype)\n if form.is_valid():\n if (not assignmentype) and (Assignmentype.objects.filter(\n title=form.cleaned_data['title'])):\n context['error'] = 'Oups, this assignment title has \\\n already been used, change it!'\n else:\n new_assignmentype = form.save(commit=False)\n new_assignmentype.prof = prof\n new_assignmentype.save()\n # create folder where to upload assignments\n try:\n os.mkdir(os.path.join(settings.BASE_DIR,\n settings.MEDIA_ROOT, 'assignment_%s' %\n new_assignmentype.id))\n except FileExistsError:\n pass\n # get list students from csv file\n try:\n existing_students, new_students =\\\n tasks.get_students(new_assignmentype.list_students.path)\n # return page asking for agreement for creation of students\n request.session['existing_students'] = existing_students\n request.session['new_students'] = new_students\n request.session['assignmentype_pk'] = new_assignmentype.pk\n return redirect(\"gradapp:validate_assignmentype_students\")\n except Exception as e:\n logger.error(make_error_message(e))\n new_assignmentype.list_students = None\n new_assignmentype.save()\n # return update page of assignmentype\n return redirect('/reset_assignmentype/%s/' %\n new_assignmentype.pk)\n else:\n form = AssignmentypeForm(instance=assignmentype)\n context['message'] = message\n context['form'] = form\n context['type_post'] = type_post\n return render(request, 'gradapp/assignmentype_form.html', context)", "def display_form():\n\n return render_template('add_new_student.html')", "def student(identificator):\n student_table = db.get_table('student')\n student = student_table.get(identificator)\n if student is None:\n abort(404)\n discipline = db.get_table('discipline')\n disciplines = discipline.get()\n scores = student_table.get_scores(identificator)\n for each in disciplines:\n if each['id'] not in scores:\n scores[each['id']] = {'score': '', 'id': 0}\n form = StudentForm()\n return render_template(\n 'student.html', student=student,\n form=form, disciplines=disciplines,\n scores=scores\n )", "def create(self, validated_data):\n return Assignment.objects.create(**validated_data)", "def add_students() -> None:\r\n faculties = [\"Computer Science\", \"Performing Arts\", \"Engineering\", \"Economics\"]\r\n for faculty in faculties:\r\n for _ in range(50):\r\n create_student(faculty)", "def AddStudent(self, event):\n pass", "def addStudent(request):\n\temp = models.Teacher.objects.get(user=request.user)\n\tif not emp.student_permit:\n\t\traise Http404\n\tcontext_dict = {\n\t\t\"all_courses\": context_helper.course_helper(),\n\t\t\"blood_groups\": context_helper.blood_group_helper(),\n\t\t\"guardian_types\": context_helper.guardian_type_helper(),\n\t\t\"gender_type\": context_helper.gender_helper(),\n\t}\n\tif request.method == 'POST':\n\t\tsname = request.POST.get('sname')\n\t\troll = request.POST.get('rno')\n\t\tdob = request.POST.get('dob')\n\t\tgender = request.POST.get('gender_picker')\n\t\tbgroup = request.POST.get('blood_group_picker')\n\t\tif bgroup == 'Choose option':\n\t\t\tbgroup = None\n\t\tphone = request.POST.get('phone')\n\t\tcurradd = request.POST.get('curradd')\n\t\tpermadd = request.POST.get('permadd')\n\t\tgname = request.POST.get('gname')\n\t\tcourse = request.POST.get('course_picker')\n\t\tbatch = request.POST.get('batch')\n\t\tgtype = request.POST.get('guardian_type_picker')\n\t\tgphone = request.POST.get('gphone')\n\t\temail = request.POST.get('email')\n\t\tduplicate_student = models.Student.objects.filter(\n\t\t\tname=sname, dob=dob, guardian_name=gname,\n\t\t\tguardian_type=gtype, phone=phone, email=email\n\t\t).first()\n\t\tif duplicate_student:\n\t\t\tcontext_dict[\"message\"] = 'Student already exist.'\n\t\t\tduplicate_student.soft_delete=False\n\t\t\tduplicate_student.save()\n\t\t\treturn render(request, \"AddStudent.html\", context_dict)\n\t\taddress_flag = request.POST.get('address_flag')\n\t\taddress_flag = True if address_flag == 'on' else False\n\t\tif address_flag == True:\n\t\t\tpermadd = curradd\n\t\ttry:\n\t\t\tstudent = models.Student(\n\t\t\t\tname=sname,\n\t\t\t\troll_no=roll,\n\t\t\t\tdob=dob,\n\t\t\t\tgender=gender,\n\t\t\t\tblood_group=bgroup,\n\t\t\t\tphone=phone,\n\t\t\t\tcurr_address=curradd,\n\t\t\t\tperm_address=permadd,\n\t\t\t\tguardian_name=gname,\n\t\t\t\tguardian_type=gtype,\n\t\t\t\tguardian_phone=gphone,\n\t\t\t\tcourse=models.Course.objects.get(pk=course),\n\t\t\t\tbatch=batch,\n\t\t\t\temail=email,\n\t\t\t\taddress_flag=address_flag\n\t\t\t)\n\t\t\tif \"profile-img\" in request.FILES:\n\t\t\t\tstudent.photo = request.FILES[\"profile-img\"]\n\t\t\tstudent.save()\n\t\t\thistory = models.History(\n\t\t\t\tuser=emp,\n\t\t\t\tactivity='Added roll number' + str(roll) +'.\\n',\n\t\t\t\tactivity_type=\"add student\"\n\t\t\t)\n\t\t\thistory.save()\n\t\t\tcontext_dict[\"message\"] = 'Successfully added new student.'\n\t\t\tcontext_dict[\"success\"] = True\n\t\texcept Exception as e:\n\t\t\tcontext_dict[\"message\"] = str(e)\n\t\t\tcontext_dict[\"success\"] = False\n\t\t\tprint(e)\n\treturn render(\n\t\trequest, \"addStudent.html\", context_dict\n\t)", "def show_add_student_form():\n\n return render_template(\"add_student_form.html\")", "def _create_students(self):\n def mktime(str_date):\n return time.mktime(time.strptime(\n str_date, CountSkillCompletion.DATE_FORMAT))\n self.day1 = '2015-01-01'\n self.day2 = '2015-01-02'\n self.day3 = '2015-01-03'\n self.day4 = '2015-01-04'\n c = SkillCompletionTracker.COMPLETED\n p = SkillCompletionTracker.IN_PROGRESS\n # progress string for students\n students_progress = [\n {self.skill1.id : {c: mktime(self.day2), p: mktime(self.day1)},\n self.skill2.id : {c: mktime(self.day4), p: mktime(self.day1)}},\n {self.skill1.id : {c: mktime(self.day2), p: mktime(self.day2)},\n self.skill2.id : {p: mktime(self.day1)}},\n {self.skill1.id : {c: mktime(self.day1)}},\n {} # No progress\n ]\n for index, progress in enumerate(students_progress):\n student = models.Student(user_id=str(index))\n student.put()\n comp = models.StudentPropertyEntity.create(\n student=student,\n property_name=SkillCompletionTracker.PROPERTY_KEY)\n comp.value = transforms.dumps(progress)\n comp.put()", "def test14_add_new_student_with_teacher(self):\n students_list_with_new_student = self.students_page. \\\n click_edit_students_list_button(). \\\n click_add_new_student_button(). \\\n enter_student_data(data['third_new_student']).\\\n enter_name_approved_by_custom(data['third_new_student']). \\\n click_save_data_changes_button(). \\\n click_exit_students_list_editor_button(). \\\n students_table()\n student = data_student_for_check(data['third_new_student'])\n self.assertEqual(self.main_page.get_current_url(),\n data['expected_url'])\n self.assertIn(student, students_list_with_new_student)\n return self.students_page", "def add_grades(self, request, pk=None):\n\n instance = self.get_object()\n try:\n user = self.request.user\n query = models.StudentSubject.objects.filter(\n subject__teacher__user=user,\n subject=instance\n )\n serializer = self.get_serializer(query, many=True)\n \n id = self.request.query_params.get('id')\n\n if id:\n q = get_object_or_404(\n models.StudentSubject,\n pk=id,\n subject=instance\n )\n return self.filtering(request, q)\n return Response(serializer.data)\n except:\n raise except_handler.ActionDecor()", "def __ui_add_student(self):\n student_id = input(\"student_id: \")\n student_name = input(\"student_name: \")\n\n print(\"Give disciplines for student, enter for done\")\n disciplines_list = []\n\n discipline_name = '0'\n while discipline_name != '':\n discipline_name = input(\"Discipline discipline_name: \")\n if discipline_name == '':\n break\n elif self.__discipline_controller.find_by_name(discipline_name) is not None:\n disciplines_list.append(discipline_name)\n print(\"Add discipline successful\\n\")\n else:\n print(\"Invalid discipline!\")\n\n try:\n self.__student_controller.add_student(student_id, student_name, disciplines_list)\n print(\"Add student successful\\n\")\n except StudentException as se:\n print(se)\n return\n except RepositoryException as re:\n print(re)\n return", "def student_view(self, context=None):\n # pylint: disable=no-member\n log.info(\"Studnent view called\")\n log.info(self)\n context = {\n \"student_state\": json.dumps(self.student_state()),\n \"id\": self.location.name.replace('.', '_'),\n \"max_file_size\": getattr(\n settings, \"STUDENT_FILEUPLOAD_MAX_SIZE\",\n self.STUDENT_FILEUPLOAD_MAX_SIZE\n )\n }\n fragment = Fragment()\n fragment.add_content(\n render_template(\n 'templates/assignment/show.html',\n context\n )\n )\n fragment.add_javascript(_resource(\"static/js/src/agea.js\"))\n fragment.initialize_js('ExcelSheetAssessmentXBlock')\n return fragment", "def get_add_student_form():\n\n return render_template(\"student_add.html\")", "def create_scaffold_assignments_table(self):\n log.info(\"Creating table to store Scaffold genus assignments ...\")\n self.create_table(self.ScaffoldsAssignmentsTable ,self.ScaffoldAssignmentsFields,\n self.ScaffoldAssignmentsTypes)", "def generate_report_sheet(self, subjects):\n\t\tif self.is_student:\n\t\t\treport_sheet = []\n\t\t\t# For each subject, find all student assessments\n\t\t\tfor subject in subjects:\n\t\t\t\tsubject_data = {\n\t\t\t\t\t'subject': subject.name\n\t\t\t\t}\n\t\t\t\tsubject_grades = {}\n\t\t\t\tassessment_types = AssessmentType.objects.filter(student_assessments__subject=subject).annotate(\n\t\t\t\t\tnumber=models.Count('student_assessments'), max_score=models.Sum('student_assessments__max_score'))\n\t\t\t\tfor assessment_type in assessment_types:\n\t\t\t\t\t# Probably will optimize this later, but ...\n\t\t\t\t\ttype_weight = StudentAssessmentTypeWeight.objects.filter(subject=subject, assessment_type=assessment_type)[0]\n\t\t\t\t\tsubject_grades[assessment_type.name] = {\n\t\t\t\t\t\t'max_score': assessment_type.max_score,\n\t\t\t\t\t\t'actual_score': 0,\n\t\t\t\t\t\t'max_percentage': type_weight.weight,\n\t\t\t\t\t\t'actual_percentage': 0,\n\t\t\t\t\t}\n\t\t\t\t\tassessments = subject.student_assessments.filter(assessment_type=assessment_type)\n\t\t\t\t\tfor assessment in assessments:\n\t\t\t\t\t\t# Assuming only one grade for now\n\t\t\t\t\t\tstudent_grade = assessment.grades.filter(student=self)[0]\n\t\t\t\t\t\tsubject_grades[assessment_type.name]['actual_score'] += student_grade.score\n\t\t\t\t\tactual_score = subject_grades[assessment_type.name]['actual_score']\n\t\t\t\t\tmax_score = subject_grades[assessment_type.name]['max_score']\n\t\t\t\t\tmax_percentage = type_weight.weight\n\t\t\t\t\tsubject_grades[assessment_type.name]['actual_percentage'] = (float(actual_score)/max_score)*max_percentage\n\t\t\t\tsubject_data['grades'] = subject_grades\n\t\t\t\treport_sheet.append(subject_data)\n\t\t\t# Use final grades to to determine score out of (weight) for each type\n\t\t\t# Determine final grade for the subject\n\t\t\t# Determine final grade (average) overall\n\t\t\tprint('Generated report sheet: {}'.format(report_sheet))\n\t\t\treturn report_sheet\n\t\telse:\n\t\t\tprint('Cannot generate a report sheet for a non-student')", "def example(exam_name, question_set, student):\n\n exam = Exam(exam_name)\n for question in question_set:\n exam.add_question(question, question_set[question])\n student = Student(student['f_name'], student['l_name'], student['address'])\n take_test(exam, student)\n return student, exam", "def add_student():\n if request.method == 'POST':\n db.add_student(request.form)\n return redirect('/registry')\n else:\n return render_template('add.html')", "def add_student():\n first_name = tkinter.StringVar()\n last_name = tkinter.StringVar()\n major = tkinter.StringVar()\n start_date = tkinter.StringVar()\n L1 = tkinter.Label(m, text=\"First Name:\").grid(row=1, column=2)\n E1 = tkinter.Entry(m, textvariable=first_name).grid(row=1, column=3)\n L2 = tkinter.Label(m, text=\"Last Name:\").grid(row=2, column=2)\n E2 = tkinter.Entry(m, textvariable=last_name).grid(row=2, column=3)\n L3 = tkinter.Label(m, text=\"Major:\").grid(row=3, column=2)\n E3 = tkinter.Entry(m, textvariable=major).grid(row=3, column=3)\n L4 = tkinter.Label(m, text=\"Start Date:\").grid(row=4, column=2)\n E4 = tkinter.Entry(m, textvariable=start_date).grid(row=4, column=3)\n global conn\n with conn:\n tkinter.Button(m, text=\"Submit\", width=25,\n command=lambda: create_student(conn, (first_name.get(), last_name.get(), major.get(), start_date.get()),\n first_name, last_name, major,\n start_date)).grid(row=5,\n column=3) # calls create student function", "def __ui_grade_student(self):\n student_id = input(\"Give student ID: \")\n discipline_name = input(\"Give discipline discipline_name: \")\n\n try:\n grade_value = input(\"Give grade: \")\n if not self.__student_controller.student_has_discipline(student_id, discipline_name):\n print(\"The student isn't enrolled at the given discipline!\")\n return\n self.__grade_controller.add_grade(\n student_id,\n self.__discipline_controller.get_id_by_name(discipline_name),\n grade_value\n )\n print(\"Grade successful! \\n\")\n\n except GradeException as ge:\n print(ge)\n return\n except StudentException as se:\n print(se)\n return\n except RepositoryException as re:\n print(re)\n return\n except ValueError as ve:\n print(ve)\n return", "def student_view(self, context=None):\n if self.is_course_staff():\n return self.staff_view()\n gea_assessment = GeaAssessment(User.objects.get(id=self.xmodule_runtime.user_id), self)\n frag = Fragment(loader.render_template(\"templates/edx_gea/student.html\",\n {'score' : gea_assessment.score,\n 'comment' : gea_assessment.comment}))\n return frag", "def test01_add_new_student_with_admin(self):\n students_list_with_new_student = self.students_page.\\\n click_edit_students_list_button().\\\n click_add_new_student_button().\\\n enter_student_data(data['first_new_student']).\\\n click_save_data_changes_button().\\\n click_exit_students_list_editor_button().\\\n students_table()\n student = data_student_for_check(data['first_new_student'])\n self.assertEqual(self.main_page.get_current_url(),\n data['expected_url'])\n self.assertIn(student, students_list_with_new_student)", "def __str__(self):\n return \"student:\"+str(self.name)+\":\"+str(self.age)+\":\"+str(self.major)" ]
[ "0.65330654", "0.60380155", "0.6018351", "0.59819305", "0.59672946", "0.5896777", "0.5893442", "0.58191305", "0.57622266", "0.57531506", "0.5744609", "0.5717306", "0.57112765", "0.5708278", "0.5666188", "0.56414866", "0.5589872", "0.5569364", "0.5552783", "0.55325806", "0.55222666", "0.55165184", "0.54629093", "0.5457135", "0.54568297", "0.54230374", "0.5417559", "0.53882486", "0.5381957", "0.5377504" ]
0.65064394
1
After validate_assignmentype_students, create new students and associate new+existing students to an assignmentype.assignment.
def create_assignmentype_students(request): existing_students = request.session.get('existing_students', False) new_students = request.session.get('new_students', False) assignmentype_pk = request.session.get('assignmentype_pk', False) if assignmentype_pk: tasks.create_assignment(assignmentype_pk, existing_students, new_students) return redirect('/detail_assignmentype/%s/' % assignmentype_pk) else: # TODO return error message return redirect('gradapp:index')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_assignmentype(request, assignmentype_id=None):\n prof = request.user.prof\n context = {}\n if assignmentype_id:\n assignmentype = Assignmentype.objects.get(id=assignmentype_id)\n message = 'Reset your assignment. You can upload a new student list, '\\\n 'but be aware that it will reset the assignment (all former work '\\\n 'will be lost!)'\n type_post = 'reset' # reset the assignmentype\n context['assignmentype_id'] = assignmentype.id\n else:\n assignmentype = None\n message = 'Create a new assignment!'\n type_post = 'create' # new assignmentype\n if request.method == 'POST':\n form = AssignmentypeForm(request.POST, request.FILES,\n instance=assignmentype)\n if form.is_valid():\n if (not assignmentype) and (Assignmentype.objects.filter(\n title=form.cleaned_data['title'])):\n context['error'] = 'Oups, this assignment title has \\\n already been used, change it!'\n else:\n new_assignmentype = form.save(commit=False)\n new_assignmentype.prof = prof\n new_assignmentype.save()\n # create folder where to upload assignments\n try:\n os.mkdir(os.path.join(settings.BASE_DIR,\n settings.MEDIA_ROOT, 'assignment_%s' %\n new_assignmentype.id))\n except FileExistsError:\n pass\n # get list students from csv file\n try:\n existing_students, new_students =\\\n tasks.get_students(new_assignmentype.list_students.path)\n # return page asking for agreement for creation of students\n request.session['existing_students'] = existing_students\n request.session['new_students'] = new_students\n request.session['assignmentype_pk'] = new_assignmentype.pk\n return redirect(\"gradapp:validate_assignmentype_students\")\n except Exception as e:\n logger.error(make_error_message(e))\n new_assignmentype.list_students = None\n new_assignmentype.save()\n # return update page of assignmentype\n return redirect('/reset_assignmentype/%s/' %\n new_assignmentype.pk)\n else:\n form = AssignmentypeForm(instance=assignmentype)\n context['message'] = message\n context['form'] = form\n context['type_post'] = type_post\n return render(request, 'gradapp/assignmentype_form.html', context)", "def validate_assignmentype_students(request):\n existing_students = request.session.get('existing_students', False)\n new_students = request.session.get('new_students', False)\n assignmentype_pk = request.session.get('assignmentype_pk', False)\n if assignmentype_pk:\n assignmentype = Assignmentype.objects.get(id=assignmentype_pk)\n return render(request, 'gradapp/validate_assignmentype_students.html',\n {'existing_students': existing_students,\n 'new_students': new_students,\n 'assignmentype': assignmentype})\n else:\n return redirect('gradapp:index')", "def create(self, validated_data):\n return Assignment.objects.create(**validated_data)", "def _create_students(self):\n def mktime(str_date):\n return time.mktime(time.strptime(\n str_date, CountSkillCompletion.DATE_FORMAT))\n self.day1 = '2015-01-01'\n self.day2 = '2015-01-02'\n self.day3 = '2015-01-03'\n self.day4 = '2015-01-04'\n c = SkillCompletionTracker.COMPLETED\n p = SkillCompletionTracker.IN_PROGRESS\n # progress string for students\n students_progress = [\n {self.skill1.id : {c: mktime(self.day2), p: mktime(self.day1)},\n self.skill2.id : {c: mktime(self.day4), p: mktime(self.day1)}},\n {self.skill1.id : {c: mktime(self.day2), p: mktime(self.day2)},\n self.skill2.id : {p: mktime(self.day1)}},\n {self.skill1.id : {c: mktime(self.day1)}},\n {} # No progress\n ]\n for index, progress in enumerate(students_progress):\n student = models.Student(user_id=str(index))\n student.put()\n comp = models.StudentPropertyEntity.create(\n student=student,\n property_name=SkillCompletionTracker.PROPERTY_KEY)\n comp.value = transforms.dumps(progress)\n comp.put()", "def insert_question_assignmentype(request, pk, cd):\n prof = request.user.prof\n assignmentype = Assignmentype.objects.filter(id=pk, prof=prof).first()\n cd = int(cd)\n if cd == 1:\n classForm = AddQuestionForm\n info = 'Add'\n elif cd == -1:\n classForm = RemoveQuestionForm\n info = 'Remove'\n if assignmentype:\n if request.method == 'POST':\n form = classForm(request.POST,\n nb_questions=assignmentype.nb_questions)\n if form.is_valid():\n question = form.cleaned_data['question']\n # Modify attribute question of all associated evalquestion\n if cd == -1:\n evalquestions = Evalquestion.objects.filter(\n evalassignment__assignment__assignmentype=assignmentype,\n question=question)\n evalquestions.delete()\n evalquestions = Evalquestion.objects.filter(\n evalassignment__assignment__assignmentype=assignmentype,\n question__gte=question)\n evalquestions.update(question=F('question') + cd)\n # Create a new evalquestion for each evalassignment (if cd=1)\n # and inform that it has to be graded\n for evalassignment in Evalassignment.objects.filter(\n assignment__assignmentype=assignmentype):\n if cd == 1:\n Evalquestion.objects.create(\n evalassignment=evalassignment, question=question)\n evalassignment.reset_grade()\n elif cd == -1:\n evalassignment.grade_assignment = None\n evalassignment.save()\n # Add a question to the assignmentype\n assignmentype.nb_questions += cd\n if cd == 1:\n if assignmentype.questions_coeff:\n assignmentype.questions_coeff.insert(question - 1, None)\n if assignmentype.questions_statement:\n assignmentype.questions_statement.insert(question - 1,\n None)\n assignmentype.save()\n elif cd == -1:\n if assignmentype.questions_coeff:\n del assignmentype.questions_coeff[question - 1]\n if assignmentype.questions_statement:\n del assignmentype.questions_statement[question - 1]\n assignmentype.save()\n log = tasks.compute_grades_assignmentype(assignmentype.pk)\n logger.info(log)\n return redirect('/detail_assignmentype/%s/' % assignmentype.pk)\n form = classForm(nb_questions=assignmentype.nb_questions)\n context = {'assignmentype': assignmentype, 'form': form, 'info': info,\n 'cd': cd}\n return render(request, 'gradapp/insert_question.html', context)\n else:\n return redirect('gradapp:index')", "def add_an_assignment(cls):\n os.system('clear')\n while True:\n data = Ui.get_inputs(['Start date\\n\\tday(1-31): ', '\\tmonth(1-12): ', '\\tyear(2000+): ',\n 'End date\\n\\tday(1-31): ', '\\tmonth(1-12): ', '\\tyear(2000+): ',\n 'Assignment name\\n\\t'], \"Please provide the assignment details: \\n\")\n try:\n start_date_day = int(data[0])\n start_date_month = int(data[1])\n start_date_year = int(data[2])\n end_date_day = int(data[3])\n end_date_month = int(data[4])\n end_date_year = int(data[5])\n name_of_assign = str(data[6])\n except ValueError:\n Ui.print_message(\"\\nDate must be an integer!\\n\\n\")\n break\n\n if start_date_day > 31 or start_date_day < 1:\n Ui.print_message('\\nStart day value is incorrect')\n else:\n if start_date_month > 12 or start_date_month < 1:\n Ui.print_message('\\nStart month value is incorrect')\n else:\n if start_date_year > 9999 or start_date_year < 2000:\n Ui.print_message('\\nStart year value is incorrect')\n else:\n if end_date_day > 31 or end_date_day < 1:\n Ui.print_message('\\nEnd day value is incorrect')\n else:\n if end_date_month > 12 or end_date_month < 1:\n Ui.print_message('\\nEnd month value is incorrect')\n else:\n if end_date_year > 9999 or end_date_year < 1000:\n Ui.print_message('\\nEnd year value is incorrect')\n else:\n if len(name_of_assign) <= 1:\n Ui.print_message(\"\\nAssignment name have to be longer!\")\n else:\n list_of_names_of_assignments = []\n for i in Assignments.assignments_list:\n list_of_names_of_assignments.append(i.assignment_name)\n if name_of_assign in list_of_names_of_assignments:\n Ui.print_message(\"\\nAssignment name already exist, \"\n \"type another one!\")\n else:\n start_date = '{}-{}-{}'.format(start_date_year,\n start_date_month,\n start_date_day)\n end_date = '{}-{}-{}'.format(end_date_year,\n end_date_month,\n end_date_day)\n new_assignment = cls(start_date, end_date, name_of_assign)\n Assignments.assignments_list.append(new_assignment)\n Ui.print_message(\"\\nAssignment added!\\n\")\n Ui.get_inputs([''], \"Click enter to go back\")\n break # it stops the WHILE loop whenever passed information is incorrect, or assignment has been added", "def _enroll_students_in_course(self, course_id, num_students):\r\n\r\n for _ in range(num_students):\r\n random_id = uuid4().hex[:8]\r\n self.create_student(username='student{0}'.format(random_id))", "def add_students() -> None:\r\n faculties = [\"Computer Science\", \"Performing Arts\", \"Engineering\", \"Economics\"]\r\n for faculty in faculties:\r\n for _ in range(50):\r\n create_student(faculty)", "def create(self, validated_data):\n tags = validated_data.pop(\"tags\", [])\n attachments = validated_data.pop(\"attachments\", [])\n request_user = validated_data.pop(\"request_user\") # this should always be there\n agenda_create = validated_data.pop(\"agenda_create\", None)\n agenda_type = validated_data.pop(\"agenda_type\", None)\n agenda_parent_id = validated_data.pop(\"agenda_parent_id\", None)\n\n assignment = Assignment(**validated_data)\n if has_perm(request_user, \"agenda.can_manage\"):\n assignment.agenda_item_update_information[\"create\"] = agenda_create\n assignment.agenda_item_update_information[\"type\"] = agenda_type\n assignment.agenda_item_update_information[\"parent_id\"] = agenda_parent_id\n\n assignment.save()\n assignment.tags.add(*tags)\n assignment.attachments.add(*attachments)\n inform_changed_data(assignment)\n return assignment", "def _create_students_with_state(self, num_students, state=None, grade=0, max_grade=1):\r\n self.define_option_problem(PROBLEM_URL_NAME)\r\n students = [\r\n UserFactory.create(username='robot%d' % i, email='robot+test+%[email protected]' % i)\r\n for i in xrange(num_students)\r\n ]\r\n for student in students:\r\n CourseEnrollmentFactory.create(course_id=self.course.id, user=student)\r\n StudentModuleFactory.create(course_id=self.course.id,\r\n module_state_key=self.location,\r\n student=student,\r\n grade=grade,\r\n max_grade=max_grade,\r\n state=state)\r\n return students", "def createStudent(self):\n self.createProfile()\n from soc.modules.gsoc.models.profile import GSoCStudentInfo\n properties = {'key_name': self.profile.key().name(), 'parent': self.profile}\n self.profile.student_info = seeder_logic.seed(GSoCStudentInfo, properties)\n self.profile.put()", "def create_scaffold_assignments_table(self):\n log.info(\"Creating table to store Scaffold genus assignments ...\")\n self.create_table(self.ScaffoldsAssignmentsTable ,self.ScaffoldAssignmentsFields,\n self.ScaffoldAssignmentsTypes)", "def process_data(rows, options):\n cohort = int(options.cohort)\n id_expiry = PHASE_END_DATES[cohort][4]\n for row in rows:\n new_student = Person()\n for k, v in row.items():\n setattr(new_student, k, v)\n new_student.student_cohort = cohort\n new_student.id_expiry = id_expiry\n if options.section:\n new_student.student_sec_phase1 = options.section\n if options.add:\n new_student.save()\n print \"Saved:\", new_student", "def create_assignments(self, assignment_pattern, test_data):\n # First store how many assignments are already in the system,\n # so during the tests we can check the number of new assignments\n # created.\n test_data['initial_assignment_count'] = (\n len(self.assignment_api.list_role_assignments()))\n\n # Now create the new assignments in the test plan\n for assignment in assignment_pattern:\n # Each assignment is a dict of the form:\n #\n # { 'user': 0, 'project':1, 'role': 6}\n #\n # where the value of each item is the index into the array of\n # entities created earlier.\n #\n # We process the assignment dict to create the args required to\n # make the create_grant() call.\n args = {}\n for param in assignment:\n if param == 'inherited_to_projects':\n args[param] = assignment[param]\n else:\n # Turn 'entity : 0' into 'entity_id = ac6736ba873d'\n # where entity in user, group, project or domain\n key, value = self._convert_entity_shorthand(\n param, assignment, test_data)\n args[key] = value\n self.assignment_api.create_grant(**args)\n return test_data", "def register_student(self, **fields):\n if 'student_key' not in fields.keys():\n raise KeyError('Primary key is missing')\n existing_fields = [i.name for i in self._db.get_columns('students')]\n needed_fields = {}\n for key, value in fields.items():\n if key in existing_fields:\n needed_fields[key] = value\n if 'UID' not in needed_fields.keys():\n needed_fields['UID'] = needed_fields['student_key']\n check = Students.get_or_none(student_key=needed_fields['student_key'])\n if check is not None:\n return check\n dummy_parent = Parents.get(parent_key=fields['parent']) if 'parent' in fields else Parents.get(parent_key='0')\n new_student = Students.get_or_create(parent=dummy_parent, **needed_fields)\n return new_student[0]", "def create_student_accounts(self):\r\n # Create student accounts and activate them.\r\n for i in range(len(self.STUDENT_INFO)):\r\n email, password = self.STUDENT_INFO[i]\r\n username = 'u{0}'.format(i)\r\n self.create_account(username, email, password)\r\n self.activate_user(email)", "def statement_assignmentype(request, pk):\n prof = request.user.prof\n context = {'prof': prof}\n assignmentype = Assignmentype.objects.filter(pk=pk, prof=prof).first()\n if assignmentype:\n nb_questions = assignmentype.nb_questions\n if request.method == 'POST':\n form = StatementForm(request.POST,\n nb_questions=nb_questions)\n if form.is_valid():\n assignmentype.questions_statement =\\\n [form.cleaned_data['statement_%s' % i]\n for i in range(1, assignmentype.nb_questions + 1)]\n assignmentype.save()\n return redirect('/detail_assignmentype/%s/' % pk)\n else:\n questions_statement = assignmentype.questions_statement\n statement = {}\n if questions_statement:\n for i in range(1, nb_questions + 1):\n statement['statement_%s' % i] =\\\n assignmentype.questions_statement[i - 1]\n else:\n statement = dict.fromkeys(['statement_%s' % i\n for i in range(1, nb_questions + 1)],\n None)\n form = StatementForm(nb_questions=nb_questions,\n initial=statement)\n context['form'] = form\n context['assignmentype'] = assignmentype\n return render(request, 'gradapp/statement_assignmentype.html',\n context)\n return redirect('gradapp:list_assignmentypes_running')", "def add_assignment(cls, mentor_id, title, start_date, end_date, file_name, group='0'):\n new = Assignment(mentor_id, title, start_date, end_date, file_name, group)\n db.session.add(new)\n db.session.commit()", "def create(self, validated_data):\n responses_data = validated_data.pop('student_responses')\n student = Student.objects.create(**validated_data)\n for response_data in responses_data:\n StudentResponsePrototype.objects.create(student=student, **response_data)\n\n return student", "def add_students(self):\n if(self.Roll_No_var.get()==\"\" or self.name_var.get()==\"\" or self.gender_var.get()==\"\" or self.contact_var.get()==\"\" or self.email_var.get()==\"\" or self.dob_var.get()==\"\" or self.pref1.get()==\"\" or self.pref2.get()==\"\" or self.pref3.get()==\"\" or self.type.get()==\"\" or self.rank.get()==\"\" or self.marks.get()==\"\"):\n messagebox.showerror(\"Error\",\"All fields are Required!!\")\n else:\n con = pymysql.connect(host=\"localhost\",user=\"root\",password=\"mysqlrootpasswordhere\",database=\"demodb\")\n cursor = con.cursor()\n try:\n cursor.execute(\"insert into STDB values(%s,%s,%s,%s,%s,%s,%s,%s)\",(self.Roll_No_var.get(),\n self.name_var.get(),\n self.email_var.get(),\n self.gender_var.get(),\n self.contact_var.get(),\n self.dob_var.get(),\n self.txt_Address.get(\"1.0\",END),\n self.stud_id_var.get()\n ))\n cursor.execute(\"insert into preference values(%s,%s,%s,%s)\",(self.Roll_No_var.get(),\n self.pref1.get(),\n self.pref2.get(),\n self.pref3.get()))\n cursor.execute(\"insert into qualification values(%s,%s,%s,%s)\",(self.Roll_No_var.get(),\n self.type.get(),\n self.rank.get(),\n self.marks.get()))\n \n con.commit()\n self.fetch_data()\n self.clear()\n con.close()\n messagebox.showinfo(\"Success\",\"Record has been Inserted\")\n except pymysql.err.InternalError:\n messagebox.showerror(\"Error\",\"App_id,Qualification rank , Qualification Marks Should be Integer\")\n self.Roll_No_var.set(\"\")\n self.rank.set(\"\")\n self.marks.set(\"\")\n except pymysql.err.IntegrityError:\n messagebox.showerror(\"Error\",\"Application Id is Already Exist Use Different Application Id\")\n self.Roll_No_var.set(\"\")", "def supereval_assignment(request, assignment_pk, i):\n assignment = Assignment.objects.get(id=assignment_pk)\n evalassignment = Evalassignment.objects.filter(assignment=assignment,\n is_supereval=True).first()\n redirect_url = ('/detail_assignmentype/%s/#assignment_%s' %\n (assignment.assignmentype.id, assignment.id))\n if not evalassignment:\n evalassignment = Evalassignment(evaluator=request.user,\n assignment=assignment)\n evalassignment.is_supereval = True\n evalassignment.save()\n for iq in range(assignment.assignmentype.nb_questions):\n Evalquestion.objects.create(evalassignment=evalassignment,\n question=(iq + 1))\n context = base_eval_assignment(request, evalassignment, i,\n '/supereval_assignment/%s/%s/' %\n (assignment_pk, i),\n redirect_url)\n if context:\n return render(request, 'gradapp/evalassignment_form.html', context)\n else:\n return redirect(redirect_url)", "def add_students_to_table(students):\n for student_name, points in students.items():\n try:\n Student.create(username=student_name, points=points)\n except IntegrityError: # IntError occurs when username already exists\n student_record = Student.get(username=student_name)\n if student_record.points != points:\n student_record.points = points\n student_record.save()", "def eval_evalassignment(request, pk, pts):\n student = request.user.student\n evalassignment = Evalassignment.objects.\\\n filter(pk=pk, assignment__student=student).first()\n if evalassignment:\n evalassignment.grade_evaluation = pts\n evalassignment.save()\n redirect_item = '#assignment%s' % evalassignment.assignment.id\n else:\n redirect_item = ''\n return redirect('/dashboard_student/' + redirect_item)", "def create(self, validated_data):\n user_data = validated_data.pop('user')\n user = UserSerializer.create(\n UserSerializer(), validated_data=user_data)\n student, created = Profile.objects.update_or_create(\n user=user, contact_number=validated_data.pop('contact_number'), meter_id=validated_data.pop('meter_id'))\n return student", "def assign_classes(G, school_type, class_size, N_classes, N_floors):\n\t# ages that are taught in the given school type\n\tage_bracket = get_age_bracket(school_type)\n\t# distribution of classes over the available floors and neighborhood \n\t# relations of classes based on spatial proximity\n\tfloors, floors_inv = get_floor_distribution(N_floors, N_classes)\n\n\tall_students = {age:[] for age in age_bracket}\n\tclass_counter = 1\n\tsequential_students = []\n\t_, N_weekdays, weekend_days = get_teaching_framework()\n\n\tfor age in age_bracket:\n\t\t# get all student nodes with one age\n\t\tall_students[age] = [n[0] for n in G.nodes(data=True) if \\\n\t\t\t\t\t\t\t n[1]['type'] == 'student' and n[1]['age'] == age]\n\t\tsequential_students.extend(all_students[age])\n\n\t\t# split the students of the same ages into classes of size class_size\n\t\tfor i in range(int(len(all_students[age]) / class_size)):\n\t\t\tstudents_in_class = all_students[age][\\\n\t\t\t\ti * class_size: (i + 1) * class_size]\n\n\t\t\tfor s in students_in_class:\n\t\t\t\t# add class information to student node in graph\n\t\t\t\tnx.set_node_attributes(G, {s:{\n\t\t\t\t\t\t\t\t'unit':'class_{}'.format(class_counter),\n\t\t\t\t\t\t\t\t'floor':floors_inv[class_counter]}})\n\n\t\t\tclass_counter += 1\n\n\t# relabel nodes, making sure the student node label increases sequentially\n\t# from class 1 to class N\n\tnew_student_IDs = {s:'s{:04d}'.format(i + 1) for i, s in \\\n\t\tenumerate(sequential_students)}\n\tnx.relabel_nodes(G, new_student_IDs, copy=False)", "def add_student(self, student):\n if student in self.students:\n raise ValueError('Duplicate Student.')\n self.students.append(student)\n self.grades[student.id] = []\n self.is_sorted = False", "def load_assignments(course: Course, assignments: List[Dict]) -> List[Assignment]:\n logger.info(\"Creating %s assignments via Canvas API\", len(assignments))\n\n result: List[Assignment] = []\n for assignment in assignments:\n result.append(course.create_assignment(assignment))\n\n logger.info(\"Successfully created %s assignments\", len(assignments))\n return result", "def add_student():\n\n\tprint('You must enter the student as is:\\n'\n\t\t\"'First name', 'middle name', 'Last name', 'major', 'major', 'gpa', id_number, 'minor'\"\n\t\t\" 'minor' graduation year, advisor number\\n For example: 'Kyle', 'Jacob', 'Ranney', 'Insurance'\"\n\t\t\", 'Chemistry', 3.0, 93988, 'Biology', 'NULL', 2016, 2234\\n\")\n\t# use sql insert statement\n\t# become familiar with this!\t", "def create_student_and_isa(schema: Dict[Any, Any]) -> ISA:\n student = User.get_user_by_email(schema[\"client\"][\"email\"])\n if not student:\n student = Student.create_user(**schema[\"client\"])\n schema.pop(\"client\")\n schema[\"student_id\"] = student.id\n\n isa = ISA.create_isa(**schema)\n return isa", "def approve(self):\n if (self.status == self.APPROVED):\n pass\n\n print ('starting approval process by adding events to the primary cal')\n\n primary_calendar = self.course.calendar_courses.get(primary=True)\n # print ('primary = ' + primary_calendar)\n for event in self.events.all():\n d = event.date\n start = datetime.datetime(d.year, d.month, d.day)\n start = timezone.make_aware(start, timezone.get_current_timezone())\n start = start + datetime.timedelta(hours=8)\n end = start + datetime.timedelta(hours=1)\n\n params = {\n 'calendar': primary_calendar,\n 'title': event.title,\n 'start': start,\n 'end': end\n }\n CalendarEvent.objects.create(**params)\n event.approved = True\n event.save()\n\n print ('trying to set syllabus to approved')\n\n try:\n syllabus = self.syllabus.all()[0]\n syllabus.approved = True\n syllabus.course = self.course\n syllabus.save()\n except:\n print ('dang, that failed, but continuing nonetheless.')\n pass\n\n\n print ('creating students from roster-students')\n\n\n for student in self.students.all():\n email = student.email\n if email:\n user = utils.get_or_create_user(email, student.first_name, student.last_name)\n school = self.course.domain\n user_student = utils.get_or_create_student(school, user)\n\n self.course.enroll_by_roster(user_student, self)\n\n student.approved = True\n student.save()\n\n print ('instructors')\n\n for instructor in self.instructors.all():\n instructor.approved = True\n instructor.save()\n\n print ('approving done')\n\n\n self.status = self.APPROVED\n self.save()\n\n add_notification(\n self.created_by.user,\n 'Your class set for {}, is approved and published!'.format(self.course)\n )" ]
[ "0.69684607", "0.6521637", "0.629256", "0.6040038", "0.5897248", "0.58499175", "0.5832194", "0.56943554", "0.5614065", "0.5535974", "0.55257034", "0.547506", "0.54501027", "0.5429327", "0.53555715", "0.5349613", "0.5298238", "0.5270237", "0.5244102", "0.52330476", "0.5217538", "0.52124214", "0.5196098", "0.5190959", "0.51866525", "0.51771414", "0.5159002", "0.5158999", "0.5154546", "0.51533854" ]
0.79302144
0
List all running (archived=False) assignmentype
def list_assignmentypes_running(request): prof = request.user.prof context = {'type_assignmentype': 'running', 'prof': prof} context['list_assignmentypes'] = Assignmentype.objects.\ filter(archived=False, prof=prof).order_by('deadline_submission') return render(request, 'gradapp/list_assignmentype.html', context)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_assignmentypes_archived(request):\n prof = request.user.prof\n context = {'type_assignmentype': 'archived', 'prof': prof}\n context['list_assignmentypes'] = Assignmentype.objects.\\\n filter(archived=True, prof=prof)\n return render(request, 'gradapp/list_assignmentype.html',\n context)", "def payable() -> List[str]:\n return [\n AssignmentState.LAUNCHED,\n AssignmentState.ASSIGNED,\n AssignmentState.COMPLETED,\n AssignmentState.ACCEPTED,\n AssignmentState.SOFT_REJECTED,\n ]", "def list_running_tasks():\n inspector = current_app.control.inspect()\n\n return inspector.active()", "def get_archieve(self):\n all_tasks = self.task_controller.get_list()\n return [task for task in all_tasks if task.is_completed == Status.DONE]", "def get_assignments(self) -> List :\n return self.assignments", "def valid() -> List[str]:\n return [\n AssignmentState.CREATED,\n AssignmentState.LAUNCHED,\n AssignmentState.ASSIGNED,\n AssignmentState.COMPLETED,\n AssignmentState.ACCEPTED,\n AssignmentState.MIXED,\n AssignmentState.REJECTED,\n AssignmentState.SOFT_REJECTED,\n AssignmentState.EXPIRED,\n ]", "def list_assignments(state):\n # TODO: Add support for single-farm listing\n # TODO: also potentially do this by invoking the update cmd\n click.echo(\"Updating farms...\")\n state.farm_manager.update_all_farms()\n click.echo(\"Successfully updated all farms.\\n\")\n\n click.echo(\"Available assignments:\")\n assignments = state.farm_manager.list_farm_assignments()\n click.echo(\"\\n\".join([\"%s: %s\" % x for x in assignments]))", "def worklist():\n from wheelcms_axle.content import Content\n pending = Content.objects.filter(state=\"pending\", node__isnull=False)\n return pending", "def pass_assign_for_mentor(cls):\n assignments_list = cls.list_from_sql()\n return assignments_list", "def incomplete() -> List[str]:\n return [\n AssignmentState.CREATED,\n AssignmentState.LAUNCHED,\n AssignmentState.ASSIGNED,\n ]", "def runs_list(years, runs):\n\thr_runs = []\n\tfor run in runs:\n\t\tif run[0:4] in years and \"Run\" in run:\n\t\t\tcheck = pc.read_file(\"/Users/mchronert/Desktop/activities/\"+run)\n\t\t\tif \"hr\" in check:\n\t\t\t\thr_runs.append(run)\n\treturn hr_runs", "def get_all_running(self) -> List[DocumentReference]:\n return self.get_all_documents(Type._RUNNING)", "def all_statuses(cls):\n return list(cls.pipeline.keys())", "def all_statuses(cls):\n return list(cls.pipeline.keys())", "def pass_assign_for_student(cls):\n today = datetime.date.today()\n assignments_list = Assignment.query.filter(Assignment.START_DATA <= today).all()\n return assignments_list", "def activemodes(self):\n\t\tret_active = []\n\t\tfor key,val in self.ms_all.iteritems():\n\t\t\tret_active.extend( val.active() )\n\t\treturn ret_active", "async def running(self) -> list[dict[str, Any]]:\n data = await self.controller.request(\"get\", \"watering/program\")\n return cast(list[dict[str, Any]], data[\"programs\"])", "def get_all(self):\n return gnome_sort(self.__assignments, sort_function=lambda assignment_a, assignment_b: assignment_a.get_assignment_id() <= assignment_b.get_assignment_id())", "def all():\n schedule = Scheduler()\n schedule.committees()\n schedule.legislators()\n schedule.bills()", "def running(cls, query_set=None):\n if query_set is None:\n query_set = cls.objects_visible.all()\n return filter(lambda s: s.state.is_running, query_set)", "def list(self):\n self.background_scheduler.print_jobs()", "def list_model_runs(self):\n return sorted([x[\"name\"] for x in self._store.read_model_runs()])", "def runs(self):\n if experiment_info.name2id(self.exp):\n runs_list = experiment_info.experiment_runs(self.instrument.upper(),self.exp)\n for item in runs_list:\n runnum = item['num']\n item['xtc_files'] = glob('{:}/*-r{:04d}*.xtc'.format(\n self.xtc_dir,runnum)) \n item['h5_files'] = glob('{:}/*-r{:04d}*.h5'.format(\n self.h5_dir,runnum)) \n else:\n runs_list = []\n\n return runs_list", "def list():\n manager = Actions()\n tasks_list = manager.get_tasks_list()\n console_utils.print_tree(manager, tasks_list)", "def test_list_runs(self):\n pass", "def get_runs(self):\n if not id:\n return None\n \n query = \"SELECT run_id, name, result_state, run_finish_time_str FROM task_history;\"\n \n cur = self.conn.cursor()\n cur.execute(query)\n self.conn.commit()\n\n col = [['run_id'], ['name'], ['result_state'], ['run_finish_time_str']]\n runs = [Run(col, run) for run in cur.fetchall()]\n \n return runs", "def concrete():\n return list(x for x in TaskScope if x is not TaskScope.GLOBAL)", "def list_runs(arn=None, nextToken=None):\n pass", "def valid_unit() -> List[str]:\n return [\n AssignmentState.CREATED,\n AssignmentState.LAUNCHED,\n AssignmentState.ASSIGNED,\n AssignmentState.COMPLETED,\n AssignmentState.ACCEPTED,\n AssignmentState.REJECTED,\n AssignmentState.SOFT_REJECTED,\n AssignmentState.EXPIRED,\n ]", "def read_all_pram(self):\n return self.PRAM" ]
[ "0.61839694", "0.5991135", "0.5889496", "0.57069176", "0.5693469", "0.56740314", "0.54603916", "0.544857", "0.54450536", "0.5431521", "0.54246765", "0.5392473", "0.53711426", "0.53711426", "0.5344439", "0.5297789", "0.5296468", "0.5291008", "0.5255987", "0.524345", "0.5240108", "0.523844", "0.52292186", "0.5220947", "0.52078724", "0.5202214", "0.51939297", "0.51908845", "0.5175229", "0.5166651" ]
0.6715551
0
List all archived assignmentype
def list_assignmentypes_archived(request): prof = request.user.prof context = {'type_assignmentype': 'archived', 'prof': prof} context['list_assignmentypes'] = Assignmentype.objects.\ filter(archived=True, prof=prof) return render(request, 'gradapp/list_assignmentype.html', context)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def archive_assignmentype(request, pk):\n prof = request.user.prof\n assignmentype = Assignmentype.objects.filter(id=pk, prof=prof).first()\n if assignmentype:\n assignmentype.archived = True\n assignmentype.save()\n return redirect('gradapp:list_assignmentypes_archived')\n else:\n return redirect('gradapp:index')", "def get_archive_items_for_assignment(self, assignment):\n\n query = {\n 'query': {\n 'filtered': {\n 'filter': {\n 'bool': {\n 'must': {\n 'term': {'assignment_id': str(assignment[config.ID_FIELD])}\n },\n }\n }\n }\n }\n }\n\n req = ParsedRequest()\n repos = 'archive,published,archived'\n req.args = {'source': json.dumps(query), 'repo': repos}\n return get_resource_service('search').get(req=req, lookup=None)", "def get_assignments(self) -> List :\n return self.assignments", "def archive_list(self) -> List[str]:\n bucket = self.client()\n results = []\n for item in bucket.objects.all():\n if (\n item.key.endswith(\".arcd\") or item.key.endswith(\".arcd.gpg\")\n ) and \"meta\" not in item.key:\n results.append(item.key.split(\".\", 1)[0])\n return results", "def list_assignments(state):\n # TODO: Add support for single-farm listing\n # TODO: also potentially do this by invoking the update cmd\n click.echo(\"Updating farms...\")\n state.farm_manager.update_all_farms()\n click.echo(\"Successfully updated all farms.\\n\")\n\n click.echo(\"Available assignments:\")\n assignments = state.farm_manager.list_farm_assignments()\n click.echo(\"\\n\".join([\"%s: %s\" % x for x in assignments]))", "def view_assignment_list():\n\n if len(Assignments.assignments_list) == 0:\n Ui.print_message(\"Assignment list is empty\")\n else:\n Ui.print_assignments_list(Assignments.assignments_list, \"Assignments List:\")", "def archive_ids(self) -> Sequence[str]:\n return pulumi.get(self, \"archive_ids\")", "def get_assignment_files(self):\n files = list()\n \n #Make sure assignment file directory exists\n if not os.path.exists(self.get_assignment_path()):\n return files\n \n for f in os.listdir(self.get_assignment_path()):\n if os.path.isfile(os.path.join(self.get_assignment_path(), f)):\n info = os.stat(os.path.join(self.get_assignment_path(), f))\n files.append((f, int(info[6]), datetime.fromtimestamp(info[9])))\n \n return files", "def pass_assign_for_mentor(cls):\n assignments_list = cls.list_from_sql()\n return assignments_list", "def get_all(self):\n return gnome_sort(self.__assignments, sort_function=lambda assignment_a, assignment_b: assignment_a.get_assignment_id() <= assignment_b.get_assignment_id())", "def list_assignmentypes_running(request):\n prof = request.user.prof\n context = {'type_assignmentype': 'running', 'prof': prof}\n context['list_assignmentypes'] = Assignmentype.objects.\\\n filter(archived=False, prof=prof).order_by('deadline_submission')\n return render(request, 'gradapp/list_assignmentype.html',\n context)", "def contentAll(groupId, channelId):\n group = db.Group.find_one_or_404({\"_id\": ObjectId(groupId)})\n assignments = [\n db.Assignment.find({\"_id\": ObjectId(assignmentId)})\n for assignmentId in group[\"assignmentIds\"]\n ]\n data = [\n {\n \"assignmentId\": assignment[\"_id\"],\n \"name\": assignment[\"name\"],\n \"dis\": assignment[\"dis\"],\n \"maxGrade\": assignment[\"maxGrade\"],\n \"startDate\": assignment[\"startDate\"],\n \"dueDate\": assignment[\"dueDate\"],\n \"type\": assignment[\"type\"],\n \"url\": assignment[\"url\"],\n }\n for assignment in assignments\n ]\n return dumps(data), 200", "def get_acls():\n return config.get_cfg_storage(ID_ACL)", "def create_objects_list_from_database(cls, table_name): # from database\n conn = sqlite3.connect(\"database.db\")\n c = conn.cursor()\n name_q = \"SELECT start_date, end_date, name FROM assignements;\"\n name_db = c.execute(name_q)\n conn.commit()\n\n assignments_list = []\n\n for row in name_db:\n start_date = row[0]\n end_date = row[1]\n name = row[2]\n\n full_name = cls(start_date, end_date, name)\n assignments_list.append(full_name)\n conn.close()\n return assignments_list", "def archives(self, **kwargs):\n return self.session.listArchives(**kwargs)", "def ListArchives(self):\n return sorted(\n [name for name in os.listdir(self._root)\n if os.path.isdir(os.path.join(self._root, name))])", "def getArchivoVotacion():", "def render_archives():\n\n\tq = \"SELECT title, text, id, project FROM entries WHERE archived=1 ORDER BY id desc\"\n\tcur = g.db.execute(q)\n\trows = cur.fetchall()\n\tentries = [dict(\n\t\t\ttitle=row[0], \n\t\t\ttext=row[1], \n\t\t\tid=row[2], \n\t\t\tproject=row[3]) for row in rows]\n\n\t\"\"\" filter catagories as to not repeat \"\"\"\n\tfiltered_catagories = set([ x[3] for x in rows ])\n\n\treturn render_template('show_entries.html', \n\t\tentries=entries, \n\t\tcatagories=filtered_catagories,\n\t\tfiltered=False,\n\t\tarchived=True,\n\t\t)", "def get_all_avps_contents(self):\n return b\"\".join([avp.dump() for avp in self.avp_list])", "def archives(backend_name):\n output_format = \"%-7s\"\n backend = get_backend(backend_name)\n click.secho(output_format % (\"ID\",), fg=\"cyan\")\n for archive_id in sorted(backend.archive_list()):\n # Print it out\n click.echo(output_format % (archive_id,))", "def list(self) -> List[Organisation]:\n ...", "def _enchance_assignment(self, doc):\n\n results = self.get_archive_items_for_assignment(doc)\n if results.count() > 0:\n doc['item_ids'] = [str(item.get(config.ID_FIELD)) for item in results]\n\n self.set_type(doc, doc)", "def amenity_get_all():\n am_list = []\n am_obj = storage.all(\"Amenity\")\n for obj in am_obj.values():\n am_list.append(obj.to_json())\n\n return jsonify(am_list)", "def incomplete() -> List[str]:\n return [\n AssignmentState.CREATED,\n AssignmentState.LAUNCHED,\n AssignmentState.ASSIGNED,\n ]", "def show_all_amenities():\n\n amenities = storage.all(Amenity).values()\n new_list = []\n for amenity in amenities:\n new_list.append(amenity.to_dict())\n return jsonify(new_list)", "def test_archived(self):\n doc = DocumentFactory(title=u'impalas', locale=u'en-US', is_archived=True)\n ApprovedRevisionFactory(document=doc, summary=u'impalas', is_approved=True)\n\n self.refresh()\n\n # include_archived gets the above document\n qs = {'q': 'impalas', 'a': 1, 'w': 1, 'format': 'json',\n 'include_archived': 'on'}\n response = self.client.get(reverse('search.advanced'), qs)\n results = json.loads(response.content)['results']\n eq_(1, len(results))\n\n # no include_archived gets you nothing since the only\n # document in the index is archived\n qs = {'q': 'impalas', 'a': 0, 'w': 1, 'format': 'json'}\n response = self.client.get(reverse('search.advanced'), qs)\n results = json.loads(response.content)['results']\n eq_(0, len(results))", "def _list(self):\n\n files = self.read_all_pages(\n self.metadata_url + 'nodes/' + self.backup_target_id +\n '/children?filters=kind:FILE')\n\n self.names_to_ids = {f['name']: f['id'] for f in files}\n\n return self.names_to_ids.keys()", "def amenity_all():\n state_holder = []\n for state in models.storage.all(\"Amenity\").values():\n state_holder.append(state.to_dict())\n return_holder = jsonify(state_holder)\n return return_holder", "def exportAovs(self):\n\t\taovs = mc.ls( typ = 'aiAOV' )\n\t\taovData = {}\n\t\tfor a in aovs:\n\t\t\taovData[a] = {}\n\t\t\taovData[a]['enabled'] = mc.getAttr( a + '.enabled' )\n\t\t\taovData[a]['name'] = mc.getAttr( a + '.name' )\n\t\t\taovData[a]['type'] = mc.getAttr( a + '.type' )\n\t\tpickle.dump( aovData, open( self.aovsPath.path, \"wb\" ) )", "def payable() -> List[str]:\n return [\n AssignmentState.LAUNCHED,\n AssignmentState.ASSIGNED,\n AssignmentState.COMPLETED,\n AssignmentState.ACCEPTED,\n AssignmentState.SOFT_REJECTED,\n ]" ]
[ "0.646818", "0.5811683", "0.56811064", "0.55502814", "0.5527522", "0.5507093", "0.53576213", "0.53274935", "0.528445", "0.5195552", "0.5107243", "0.51069534", "0.50939465", "0.50938594", "0.50767183", "0.507538", "0.5065023", "0.50611526", "0.5055497", "0.50339127", "0.50115436", "0.50026405", "0.50019515", "0.49944708", "0.4986222", "0.49791557", "0.49632016", "0.4954626", "0.49368578", "0.49342418" ]
0.7493907
0
Dashboard of an assignmentype (id=pk)
def detail_assignmentype(request, pk): prof = request.user.prof context = {'prof': prof} assignmentype = Assignmentype.objects.filter(pk=pk, prof=prof).first() assignments = assignmentype.assignment_set.\ annotate(std=StdDev('evalassignment__grade_assignment'), mean=Avg('evalassignment__grade_assignment')) if assignmentype: context['assignmentype'] = assignmentype context['assignments'] = assignments context['range_grades'] = range(assignmentype.nb_grading) return render(request, 'gradapp/detail_assignmentype.html', context) else: return redirect('gradapp:list_assignmentypes_running')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def public_visuals_assignment_id(assignment_id: str):\n\n # Get the assignment object\n assignment = Assignment.query.filter(\n Assignment.id == assignment_id\n ).first()\n\n # If the assignment does not exist, then stop\n req_assert(assignment is not None, message='assignment does not exist')\n\n # Assert that the assignment is within the course context\n assert_course_context(assignment)\n\n # Generate and pass back the visual data\n return success_response({\n 'assignment_data': get_admin_assignment_visual_data(\n assignment_id\n )\n })", "def archive_assignmentype(request, pk):\n prof = request.user.prof\n assignmentype = Assignmentype.objects.filter(id=pk, prof=prof).first()\n if assignmentype:\n assignmentype.archived = True\n assignmentype.save()\n return redirect('gradapp:list_assignmentypes_archived')\n else:\n return redirect('gradapp:index')", "def statement_assignmentype(request, pk):\n prof = request.user.prof\n context = {'prof': prof}\n assignmentype = Assignmentype.objects.filter(pk=pk, prof=prof).first()\n if assignmentype:\n nb_questions = assignmentype.nb_questions\n if request.method == 'POST':\n form = StatementForm(request.POST,\n nb_questions=nb_questions)\n if form.is_valid():\n assignmentype.questions_statement =\\\n [form.cleaned_data['statement_%s' % i]\n for i in range(1, assignmentype.nb_questions + 1)]\n assignmentype.save()\n return redirect('/detail_assignmentype/%s/' % pk)\n else:\n questions_statement = assignmentype.questions_statement\n statement = {}\n if questions_statement:\n for i in range(1, nb_questions + 1):\n statement['statement_%s' % i] =\\\n assignmentype.questions_statement[i - 1]\n else:\n statement = dict.fromkeys(['statement_%s' % i\n for i in range(1, nb_questions + 1)],\n None)\n form = StatementForm(nb_questions=nb_questions,\n initial=statement)\n context['form'] = form\n context['assignmentype'] = assignmentype\n return render(request, 'gradapp/statement_assignmentype.html',\n context)\n return redirect('gradapp:list_assignmentypes_running')", "def admin_dashboard(request):\n data = request.GET.copy()\n if \"assigned_to\" not in data:\n data[\"assigned_to\"] = request.user.id\n assignment_form = AssignmentForm(data)\n assigned_to: Optional[Person] = None\n if assignment_form.is_valid():\n assigned_to = assignment_form.cleaned_data[\"assigned_to\"]\n\n current_events = Event.objects.current_events().prefetch_related(\"tags\")\n\n # This annotation may produce wrong number of instructors when\n # `unpublished_events` filters out events that contain a specific tag.\n # The bug was fixed in #1130.\n unpublished_events = (\n Event.objects.active()\n .unpublished_events()\n .select_related(\"host\")\n .annotate(\n num_instructors=Count(\n Case(\n When(task__role__name=\"instructor\", then=Value(1)),\n output_field=IntegerField(),\n )\n ),\n )\n .order_by(\"-start\")\n )\n\n # assigned events that have unaccepted changes\n updated_metadata = Event.objects.active().filter(metadata_changed=True)\n\n current_events = current_events.filter(assigned_to=assigned_to)\n unpublished_events = unpublished_events.filter(assigned_to=assigned_to)\n updated_metadata = updated_metadata.filter(assigned_to=assigned_to)\n\n context = {\n \"title\": None,\n \"assignment_form\": assignment_form,\n \"assigned_to\": assigned_to,\n \"current_events\": current_events,\n \"unpublished_events\": unpublished_events,\n \"updated_metadata\": updated_metadata.count(),\n \"main_tags\": Tag.objects.main_tags(),\n }\n return render(request, \"dashboard/admin_dashboard.html\", context)", "def modify_assignmentype(request, pk):\n prof = request.user.prof\n assignmentype = Assignmentype.objects.filter(id=pk, prof=prof).first()\n if assignmentype:\n if request.method == 'POST':\n form = LightAssignmentypeForm(request.POST, instance=assignmentype)\n if form.is_valid():\n form.save()\n return redirect('/detail_assignmentype/%s/' % assignmentype.pk)\n else:\n form = LightAssignmentypeForm(instance=assignmentype)\n context = {}\n context['assignmentype_id'] = assignmentype.id\n context['message'] = 'Modify details of your assignment '\\\n '(keep current student list)'\n context['form'] = form\n context['type_post'] = 'modify'\n return render(request, 'gradapp/assignmentype_form.html', context)\n else:\n return redirect('gradapp:index')", "def assignment(self, request, pk=None):\n\n obj = self.get_object()\n obj_mapping = {\n 'teacher': obj\n }\n try:\n user = self.request.user\n query = models.Assignment.objects.filter(\n subject__teacher__user=user,\n subject=obj\n )\n serializer = self.get_serializer(query, many=True)\n\n id = self.request.query_params.get('id')\n\n if id:\n q = get_object_or_404(\n models.Assignment,\n pk=id,\n subject=obj\n )\n return self.filtering(request, q)\n\n self.actionhelper(request, query, obj_mapping)\n\n return Response(serializer.data)\n except:\n raise except_handler.ActionDecor()", "def _get_dashboard_object(self):\n pass", "def view_assignment_list():\n\n if len(Assignments.assignments_list) == 0:\n Ui.print_message(\"Assignment list is empty\")\n else:\n Ui.print_assignments_list(Assignments.assignments_list, \"Assignments List:\")", "def list_assignmentypes_running(request):\n prof = request.user.prof\n context = {'type_assignmentype': 'running', 'prof': prof}\n context['list_assignmentypes'] = Assignmentype.objects.\\\n filter(archived=False, prof=prof).order_by('deadline_submission')\n return render(request, 'gradapp/list_assignmentype.html',\n context)", "def list_assignmentypes_archived(request):\n prof = request.user.prof\n context = {'type_assignmentype': 'archived', 'prof': prof}\n context['list_assignmentypes'] = Assignmentype.objects.\\\n filter(archived=True, prof=prof)\n return render(request, 'gradapp/list_assignmentype.html',\n context)", "def _assignment(info):\n\n return info.ui.context['object']", "def KLP_Show_Permissions(request, boundary_id, user_id):\n\n userObj = User.objects.get(pk=user_id) # get user object\n boundType_List = Boundary_Type.objects.all() # get all boundary types\n\n # get session value, if session is not set default value is 0\n\n try:\n sessionVal = int(request.session['session_sch_typ'])\n except:\n sessionVal = 0\n redUrl = '/list/%s/user/%s/permissions/' % (boundary_id, user_id)\n\n # get all assigned institutions to the user\n\n assignedInst = Institution.objects.select_related('boundary'\n ).filter(Q(boundary__id=boundary_id)\n | Q(boundary__parent__id=boundary_id)\n | Q(boundary__parent__parent__id=boundary_id),\n active=2).extra(where=['''schools_institution.id in (SELECT \"obj_id\" FROM \"public\".\"object_permissions_institution_perms\" WHERE \"user_id\" = '%s' AND \"Acess\" = '1')'''\n % user_id]).only('id', 'name', 'boundary'\n ).order_by('boundary', 'boundary__parent'\n , 'name')\n\n assignedInstIds = assignedInst.values_list('id', flat=True)\n\n # get unassigned institutions based on assigned institutions\n\n unAssignedInst = Institution.objects.select_related('boundary'\n ).filter(Q(boundary__id=boundary_id)\n | Q(boundary__parent__id=boundary_id)\n | Q(boundary__parent__parent__id=boundary_id),\n active=2).exclude(pk__in=assignedInstIds).only('id'\n , 'name', 'boundary').order_by('boundary',\n 'boundary__parent', 'name')\n\n # get all assigned assessment objects\n\n assignedpermObjects = \\\n UserAssessmentPermissions.objects.select_related('assessment',\n 'instituion'\n ).filter(Q(instituion__boundary__id=boundary_id)\n | Q(instituion__boundary__parent__id=boundary_id)\n | Q(instituion__boundary__parent__parent__id=boundary_id),\n user=userObj, access=True).defer('access'\n ).order_by('instituion__boundary',\n 'instituion__boundary__parent',\n 'instituion__name')\n\n unMapObjs = \\\n Assessment_StudentGroup_Association.objects.select_related('student_group'\n , 'assessment'\n ).filter(Q(student_group__institution__boundary__id=boundary_id)\n | Q(student_group__institution__boundary__parent__id=boundary_id)\n | Q(student_group__institution__boundary__parent__parent__id=boundary_id),\n active=2).defer('active'\n ).order_by('student_group__institution__boundary',\n 'student_group__institution__boundary__parent',\n 'student_group__institution__name')\n for assignedPermObj in assignedpermObjects:\n qsets = Q(assessment=assignedPermObj.assessment) \\\n & Q(student_group__institution=assignedPermObj.instituion)\n unMapObjs = unMapObjs.exclude(qsets)\n unMapList = unMapObjs.values_list('student_group__institution',\n 'assessment').distinct()\n\n # get all unassigned assessment objects\n\n qList = \\\n [Assessment_StudentGroup_Association.objects.select_related('student_group'\n , 'assessment'\n ).filter(student_group__institution__id=unMapVal[0],\n assessment__id=unMapVal[1]).defer('active')[0] for unMapVal in\n unMapList]\n\n return render_to_response('viewtemplates/show_permissions.html', {\n 'assignedInst': assignedInst,\n 'userId': user_id,\n 'userName': userObj.username,\n 'unAssignedInst': unAssignedInst,\n 'assignedpermObjects': assignedpermObjects,\n 'redUrl': redUrl,\n 'qList': qList,\n }, context_instance=RequestContext(request))", "def visual_sundial_assignment(assignment_id: str):\n # Get the assignment object\n assignment = Assignment.query.filter(\n Assignment.id == assignment_id\n ).first()\n\n # If the assignment does not exist, then stop\n req_assert(assignment is not None, message='assignment does not exist')\n\n # Assert that the assignment is within the view of\n # the current admin.\n assert_course_context(assignment)\n\n # Pull the (maybe cached) sundial data\n return success_response({'sundial': get_assignment_sundial(assignment.id)})", "def dashboard():", "def eval_assignment(request, pk, i):\n evalassignment = Evalassignment.objects.filter(evaluator=request.user,\n pk=pk).first()\n if evalassignment and evalassignment.assignment.assignmentype.\\\n deadline_submission < timezone.now():\n # if evalassignment exists and if it is after the submission deadline\n context = base_eval_assignment(\n request, evalassignment, i,\n '/eval_assignment/%s/%s/' % (evalassignment.id, i),\n '/dashboard_student/')\n if context:\n return render(request, 'gradapp/evalassignment_form.html', context)\n else:\n return redirect('/dashboard_student/#assignment%s' %\n evalassignment.assignment.id)\n else:\n # if evalassignment does not exist or before submission deadline\n if evalassignment:\n redirect_item = '#assignment%s' % evalassignment.assignment.id\n else:\n redirect_item = ''\n return redirect('/dashboard_student/' + redirect_item)", "def eval_evalassignment(request, pk, pts):\n student = request.user.student\n evalassignment = Evalassignment.objects.\\\n filter(pk=pk, assignment__student=student).first()\n if evalassignment:\n evalassignment.grade_evaluation = pts\n evalassignment.save()\n redirect_item = '#assignment%s' % evalassignment.assignment.id\n else:\n redirect_item = ''\n return redirect('/dashboard_student/' + redirect_item)", "def download_assignment_prof(request, pk):\n assignment = Assignment.objects.\\\n filter(pk=pk, assignmentype__prof__user=request.user).first()\n if assignment:\n filename = 'assign_%s.%s' % (assignment.student.user.username,\n assignment.document.name.split('.')[-1])\n response = HttpResponse(assignment.document,\n content_type='application/force_download')\n response['Content-Disposition'] = 'attachment; filename=%s' % filename\n return response\n else:\n return redirect('gradapp:list_assignmentypes_running')", "def assignments(self, request, pk=None):\n\n obj = self.get_object()\n try:\n queryset = models.Assignment.objects.filter(\n subject=obj.subject,\n assign=obj\n )\n serializer = self.get_serializer(queryset, many=True)\n\n id = self.request.query_params.get('id')\n\n if id:\n query = get_object_or_404(\n models.Assignment,\n id=id,\n assign=obj\n )\n return self.filtering(request, query)\n\n return Response(serializer.data)\n except:\n raise except_handler.ActionDecor()", "def case_detail_assign_view(request, pk):\n issue = _get_issue(request, pk)\n serializer = IssueAssignmentSerializer(data=request.data, instance=issue)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response({\"issue\": IssueDetailSerializer(issue).data})", "def create_assignmentype(request, assignmentype_id=None):\n prof = request.user.prof\n context = {}\n if assignmentype_id:\n assignmentype = Assignmentype.objects.get(id=assignmentype_id)\n message = 'Reset your assignment. You can upload a new student list, '\\\n 'but be aware that it will reset the assignment (all former work '\\\n 'will be lost!)'\n type_post = 'reset' # reset the assignmentype\n context['assignmentype_id'] = assignmentype.id\n else:\n assignmentype = None\n message = 'Create a new assignment!'\n type_post = 'create' # new assignmentype\n if request.method == 'POST':\n form = AssignmentypeForm(request.POST, request.FILES,\n instance=assignmentype)\n if form.is_valid():\n if (not assignmentype) and (Assignmentype.objects.filter(\n title=form.cleaned_data['title'])):\n context['error'] = 'Oups, this assignment title has \\\n already been used, change it!'\n else:\n new_assignmentype = form.save(commit=False)\n new_assignmentype.prof = prof\n new_assignmentype.save()\n # create folder where to upload assignments\n try:\n os.mkdir(os.path.join(settings.BASE_DIR,\n settings.MEDIA_ROOT, 'assignment_%s' %\n new_assignmentype.id))\n except FileExistsError:\n pass\n # get list students from csv file\n try:\n existing_students, new_students =\\\n tasks.get_students(new_assignmentype.list_students.path)\n # return page asking for agreement for creation of students\n request.session['existing_students'] = existing_students\n request.session['new_students'] = new_students\n request.session['assignmentype_pk'] = new_assignmentype.pk\n return redirect(\"gradapp:validate_assignmentype_students\")\n except Exception as e:\n logger.error(make_error_message(e))\n new_assignmentype.list_students = None\n new_assignmentype.save()\n # return update page of assignmentype\n return redirect('/reset_assignmentype/%s/' %\n new_assignmentype.pk)\n else:\n form = AssignmentypeForm(instance=assignmentype)\n context['message'] = message\n context['form'] = form\n context['type_post'] = type_post\n return render(request, 'gradapp/assignmentype_form.html', context)", "def coeff_assignmentype(request, pk):\n prof = request.user.prof\n context = {'prof': prof}\n assignmentype = Assignmentype.objects.filter(pk=pk, prof=prof).first()\n if assignmentype:\n nb_questions = assignmentype.nb_questions\n if request.method == 'POST':\n form = CoeffForm(request.POST,\n nb_questions=nb_questions)\n if form.is_valid():\n assignmentype.questions_coeff = [form.cleaned_data['coeff_%s'\n % i] for i\n in range(1, assignmentype.\n nb_questions + 1)]\n assignmentype.save()\n # Compute all grades\n log = tasks.compute_grades_assignmentype(assignmentype.id)\n logger.error(log)\n return redirect('/detail_assignmentype/%s/' % pk)\n else:\n questions_coeff = assignmentype.questions_coeff\n coeff = {}\n if questions_coeff:\n for i in range(1, nb_questions + 1):\n coeff['coeff_%s' % i] = assignmentype.questions_coeff[i - 1]\n else:\n coeff = dict.fromkeys(['coeff_%s' % i\n for i in range(1, nb_questions + 1)],\n None)\n form = CoeffForm(nb_questions=nb_questions,\n initial=coeff)\n context['form'] = form\n context['assignmentype'] = assignmentype\n return render(request, 'gradapp/coeff_assignmentype.html',\n context)\n return redirect('gradapp:list_assignmentypes_running')", "def dashboard(self):\r\n return {}", "def dashboard(self) -> api.Dashboard:\n return self._get_model(model=api.Dashboard)", "def insert_question_assignmentype(request, pk, cd):\n prof = request.user.prof\n assignmentype = Assignmentype.objects.filter(id=pk, prof=prof).first()\n cd = int(cd)\n if cd == 1:\n classForm = AddQuestionForm\n info = 'Add'\n elif cd == -1:\n classForm = RemoveQuestionForm\n info = 'Remove'\n if assignmentype:\n if request.method == 'POST':\n form = classForm(request.POST,\n nb_questions=assignmentype.nb_questions)\n if form.is_valid():\n question = form.cleaned_data['question']\n # Modify attribute question of all associated evalquestion\n if cd == -1:\n evalquestions = Evalquestion.objects.filter(\n evalassignment__assignment__assignmentype=assignmentype,\n question=question)\n evalquestions.delete()\n evalquestions = Evalquestion.objects.filter(\n evalassignment__assignment__assignmentype=assignmentype,\n question__gte=question)\n evalquestions.update(question=F('question') + cd)\n # Create a new evalquestion for each evalassignment (if cd=1)\n # and inform that it has to be graded\n for evalassignment in Evalassignment.objects.filter(\n assignment__assignmentype=assignmentype):\n if cd == 1:\n Evalquestion.objects.create(\n evalassignment=evalassignment, question=question)\n evalassignment.reset_grade()\n elif cd == -1:\n evalassignment.grade_assignment = None\n evalassignment.save()\n # Add a question to the assignmentype\n assignmentype.nb_questions += cd\n if cd == 1:\n if assignmentype.questions_coeff:\n assignmentype.questions_coeff.insert(question - 1, None)\n if assignmentype.questions_statement:\n assignmentype.questions_statement.insert(question - 1,\n None)\n assignmentype.save()\n elif cd == -1:\n if assignmentype.questions_coeff:\n del assignmentype.questions_coeff[question - 1]\n if assignmentype.questions_statement:\n del assignmentype.questions_statement[question - 1]\n assignmentype.save()\n log = tasks.compute_grades_assignmentype(assignmentype.pk)\n logger.info(log)\n return redirect('/detail_assignmentype/%s/' % assignmentype.pk)\n form = classForm(nb_questions=assignmentype.nb_questions)\n context = {'assignmentype': assignmentype, 'form': form, 'info': info,\n 'cd': cd}\n return render(request, 'gradapp/insert_question.html', context)\n else:\n return redirect('gradapp:index')", "def canvas_api_assignments(state, course_id):\n\n api = state.canvas_api()\n for assign in api.list_assignments(course_id):\n click.echo(str(assign))", "def assignment_id(self):\n return self.assignment.AssignmentId", "def delete_assignmentype(request, pk, type_list):\n prof = request.user.prof\n assignmentype = Assignmentype.objects.filter(id=pk, prof=prof).first()\n if assignmentype:\n assignmentype.delete()\n if type_list == '1':\n return redirect('gradapp:list_assignmentypes_running')\n elif type_list == '0':\n return redirect('gradapp:list_assignmentypes_archived')\n else:\n return redirect('gradapp:index')", "def get_assignment_info(self):\n url = self.server_url + \"/api/v1/courses/\" + str(self.course_id) + '/assignments/' + str(self.assignment_id)\n r = requests.get(url, headers=self.headers, params=self.params)\n assignment = json.loads(r.text)\n return assignment", "def instructor_dashboard(request, course_id):\r\n course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)\r\n course = get_course_with_access(request.user, 'staff', course_key, depth=None)\r\n\r\n instructor_access = has_access(request.user, 'instructor', course) # an instructor can manage staff lists\r\n\r\n forum_admin_access = has_forum_access(request.user, course_key, FORUM_ROLE_ADMINISTRATOR)\r\n\r\n msg = ''\r\n email_msg = ''\r\n email_to_option = None\r\n email_subject = None\r\n html_message = ''\r\n show_email_tab = False\r\n problems = []\r\n plots = []\r\n datatable = {}\r\n\r\n # the instructor dashboard page is modal: grades, psychometrics, admin\r\n # keep that state in request.session (defaults to grades mode)\r\n idash_mode = request.POST.get('idash_mode', '')\r\n idash_mode_key = u'idash_mode:{0}'.format(course_id)\r\n if idash_mode:\r\n request.session[idash_mode_key] = idash_mode\r\n else:\r\n idash_mode = request.session.get(idash_mode_key, 'Grades')\r\n\r\n enrollment_number = CourseEnrollment.num_enrolled_in(course_key)\r\n\r\n # assemble some course statistics for output to instructor\r\n def get_course_stats_table():\r\n datatable = {\r\n 'header': ['Statistic', 'Value'],\r\n 'title': _('Course Statistics At A Glance'),\r\n }\r\n data = [['# Enrolled', enrollment_number]]\r\n data += [['Date', timezone.now().isoformat()]]\r\n data += compute_course_stats(course).items()\r\n if request.user.is_staff:\r\n for field in course.fields.values():\r\n if getattr(field.scope, 'user', False):\r\n continue\r\n\r\n data.append([\r\n field.name,\r\n json.dumps(field.read_json(course), cls=i4xEncoder)\r\n ])\r\n datatable['data'] = data\r\n return datatable\r\n\r\n def return_csv(func, datatable, file_pointer=None):\r\n \"\"\"Outputs a CSV file from the contents of a datatable.\"\"\"\r\n if file_pointer is None:\r\n response = HttpResponse(mimetype='text/csv')\r\n response['Content-Disposition'] = 'attachment; filename={0}'.format(func)\r\n else:\r\n response = file_pointer\r\n writer = csv.writer(response, dialect='excel', quotechar='\"', quoting=csv.QUOTE_ALL)\r\n encoded_row = [unicode(s).encode('utf-8') for s in datatable['header']]\r\n writer.writerow(encoded_row)\r\n for datarow in datatable['data']:\r\n # 's' here may be an integer, float (eg score) or string (eg student name)\r\n encoded_row = [\r\n # If s is already a UTF-8 string, trying to make a unicode\r\n # object out of it will fail unless we pass in an encoding to\r\n # the constructor. But we can't do that across the board,\r\n # because s is often a numeric type. So just do this.\r\n s if isinstance(s, str) else unicode(s).encode('utf-8')\r\n for s in datarow\r\n ]\r\n writer.writerow(encoded_row)\r\n return response\r\n\r\n def get_student_from_identifier(unique_student_identifier):\r\n \"\"\"Gets a student object using either an email address or username\"\"\"\r\n unique_student_identifier = strip_if_string(unique_student_identifier)\r\n msg = \"\"\r\n try:\r\n if \"@\" in unique_student_identifier:\r\n student = User.objects.get(email=unique_student_identifier)\r\n else:\r\n student = User.objects.get(username=unique_student_identifier)\r\n msg += _(\"Found a single student. \")\r\n except User.DoesNotExist:\r\n student = None\r\n msg += \"<font color='red'>{text}</font>\".format(\r\n text=_(\"Couldn't find student with that email or username.\")\r\n )\r\n return msg, student\r\n\r\n # process actions from form POST\r\n action = request.POST.get('action', '')\r\n use_offline = request.POST.get('use_offline_grades', False)\r\n\r\n if settings.FEATURES['ENABLE_MANUAL_GIT_RELOAD']:\r\n if 'GIT pull' in action:\r\n data_dir = course.data_dir\r\n log.debug('git pull {0}'.format(data_dir))\r\n gdir = settings.DATA_DIR / data_dir\r\n if not os.path.exists(gdir):\r\n msg += \"====> ERROR in gitreload - no such directory {0}\".format(gdir)\r\n else:\r\n cmd = \"cd {0}; git reset --hard HEAD; git clean -f -d; git pull origin; chmod g+w course.xml\".format(gdir)\r\n msg += \"git pull on {0}:<p>\".format(data_dir)\r\n msg += \"<pre>{0}</pre></p>\".format(escape(os.popen(cmd).read()))\r\n track.views.server_track(request, \"git-pull\", {\"directory\": data_dir}, page=\"idashboard\")\r\n\r\n if 'Reload course' in action:\r\n log.debug('reloading {0} ({1})'.format(course_key, course))\r\n try:\r\n data_dir = course.data_dir\r\n modulestore().try_load_course(data_dir)\r\n msg += \"<br/><p>Course reloaded from {0}</p>\".format(data_dir)\r\n track.views.server_track(request, \"reload\", {\"directory\": data_dir}, page=\"idashboard\")\r\n course_errors = modulestore().get_course_errors(course.id)\r\n msg += '<ul>'\r\n for cmsg, cerr in course_errors:\r\n msg += \"<li>{0}: <pre>{1}</pre>\".format(cmsg, escape(cerr))\r\n msg += '</ul>'\r\n except Exception as err: # pylint: disable=broad-except\r\n msg += '<br/><p>Error: {0}</p>'.format(escape(err))\r\n\r\n if action == 'Dump list of enrolled students' or action == 'List enrolled students':\r\n log.debug(action)\r\n datatable = get_student_grade_summary_data(request, course, get_grades=False, use_offline=use_offline)\r\n datatable['title'] = _('List of students enrolled in {course_key}').format(course_key=course_key.to_deprecated_string())\r\n track.views.server_track(request, \"list-students\", {}, page=\"idashboard\")\r\n\r\n elif 'Dump Grades' in action:\r\n log.debug(action)\r\n datatable = get_student_grade_summary_data(request, course, get_grades=True, use_offline=use_offline)\r\n datatable['title'] = _('Summary Grades of students enrolled in {course_key}').format(course_key=course_key.to_deprecated_string())\r\n track.views.server_track(request, \"dump-grades\", {}, page=\"idashboard\")\r\n\r\n elif 'Dump all RAW grades' in action:\r\n log.debug(action)\r\n datatable = get_student_grade_summary_data(request, course, get_grades=True,\r\n get_raw_scores=True, use_offline=use_offline)\r\n datatable['title'] = _('Raw Grades of students enrolled in {course_key}').format(course_key=course_key)\r\n track.views.server_track(request, \"dump-grades-raw\", {}, page=\"idashboard\")\r\n\r\n elif 'Download CSV of all student grades' in action:\r\n track.views.server_track(request, \"dump-grades-csv\", {}, page=\"idashboard\")\r\n return return_csv('grades_{0}.csv'.format(course_key.to_deprecated_string()),\r\n get_student_grade_summary_data(request, course, use_offline=use_offline))\r\n\r\n elif 'Download CSV of all RAW grades' in action:\r\n track.views.server_track(request, \"dump-grades-csv-raw\", {}, page=\"idashboard\")\r\n return return_csv('grades_{0}_raw.csv'.format(course_key.to_deprecated_string()),\r\n get_student_grade_summary_data(request, course, get_raw_scores=True, use_offline=use_offline))\r\n\r\n elif 'Download CSV of answer distributions' in action:\r\n track.views.server_track(request, \"dump-answer-dist-csv\", {}, page=\"idashboard\")\r\n return return_csv('answer_dist_{0}.csv'.format(course_key.to_deprecated_string()), get_answers_distribution(request, course_key))\r\n\r\n elif 'Dump description of graded assignments configuration' in action:\r\n # what is \"graded assignments configuration\"?\r\n track.views.server_track(request, \"dump-graded-assignments-config\", {}, page=\"idashboard\")\r\n msg += dump_grading_context(course)\r\n\r\n elif \"Rescore ALL students' problem submissions\" in action:\r\n problem_location_str = strip_if_string(request.POST.get('problem_for_all_students', ''))\r\n try:\r\n problem_location = course_key.make_usage_key_from_deprecated_string(problem_location_str)\r\n instructor_task = submit_rescore_problem_for_all_students(request, problem_location)\r\n if instructor_task is None:\r\n msg += '<font color=\"red\">{text}</font>'.format(\r\n text=_('Failed to create a background task for rescoring \"{problem_url}\".').format(\r\n problem_url=problem_location_str\r\n )\r\n )\r\n else:\r\n track.views.server_track(\r\n request,\r\n \"rescore-all-submissions\",\r\n {\r\n \"problem\": problem_location_str,\r\n \"course\": course_key.to_deprecated_string()\r\n },\r\n page=\"idashboard\"\r\n )\r\n\r\n except (InvalidKeyError, ItemNotFoundError) as err:\r\n msg += '<font color=\"red\">{text}</font>'.format(\r\n text=_('Failed to create a background task for rescoring \"{problem_url}\": problem not found.').format(\r\n problem_url=problem_location_str\r\n )\r\n )\r\n except Exception as err: # pylint: disable=broad-except\r\n log.error(\"Encountered exception from rescore: {0}\".format(err))\r\n msg += '<font color=\"red\">{text}</font>'.format(\r\n text=_('Failed to create a background task for rescoring \"{url}\": {message}.').format(\r\n url=problem_location_str, message=err.message\r\n )\r\n )\r\n\r\n elif \"Reset ALL students' attempts\" in action:\r\n problem_location_str = strip_if_string(request.POST.get('problem_for_all_students', ''))\r\n try:\r\n problem_location = course_key.make_usage_key_from_deprecated_string(problem_location_str)\r\n instructor_task = submit_reset_problem_attempts_for_all_students(request, problem_location)\r\n if instructor_task is None:\r\n msg += '<font color=\"red\">{text}</font>'.format(\r\n text=_('Failed to create a background task for resetting \"{problem_url}\".').format(problem_url=problem_location_str)\r\n )\r\n else:\r\n track.views.server_track(\r\n request,\r\n \"reset-all-attempts\",\r\n {\r\n \"problem\": problem_location_str,\r\n \"course\": course_key.to_deprecated_string()\r\n },\r\n page=\"idashboard\"\r\n )\r\n except (InvalidKeyError, ItemNotFoundError) as err:\r\n log.error('Failure to reset: unknown problem \"{0}\"'.format(err))\r\n msg += '<font color=\"red\">{text}</font>'.format(\r\n text=_('Failed to create a background task for resetting \"{problem_url}\": problem not found.').format(\r\n problem_url=problem_location_str\r\n )\r\n )\r\n except Exception as err: # pylint: disable=broad-except\r\n log.error(\"Encountered exception from reset: {0}\".format(err))\r\n msg += '<font color=\"red\">{text}</font>'.format(\r\n text=_('Failed to create a background task for resetting \"{url}\": {message}.').format(\r\n url=problem_location_str, message=err.message\r\n )\r\n )\r\n\r\n elif \"Show Background Task History for Student\" in action:\r\n # put this before the non-student case, since the use of \"in\" will cause this to be missed\r\n unique_student_identifier = request.POST.get('unique_student_identifier', '')\r\n message, student = get_student_from_identifier(unique_student_identifier)\r\n if student is None:\r\n msg += message\r\n else:\r\n problem_location_str = strip_if_string(request.POST.get('problem_for_student', ''))\r\n try:\r\n problem_location = course_key.make_usage_key_from_deprecated_string(problem_location_str)\r\n except InvalidKeyError:\r\n msg += '<font color=\"red\">{text}</font>'.format(\r\n text=_('Could not find problem location \"{url}\".').format(\r\n url=problem_location_str\r\n )\r\n )\r\n else:\r\n message, datatable = get_background_task_table(course_key, problem_location, student)\r\n msg += message\r\n\r\n elif \"Show Background Task History\" in action:\r\n problem_location = strip_if_string(request.POST.get('problem_for_all_students', ''))\r\n try:\r\n problem_location = course_key.make_usage_key_from_deprecated_string(problem_location_str)\r\n except InvalidKeyError:\r\n msg += '<font color=\"red\">{text}</font>'.format(\r\n text=_('Could not find problem location \"{url}\".').format(\r\n url=problem_location_str\r\n )\r\n )\r\n else:\r\n message, datatable = get_background_task_table(course_key, problem_location)\r\n msg += message\r\n\r\n elif (\"Reset student's attempts\" in action or\r\n \"Delete student state for module\" in action or\r\n \"Rescore student's problem submission\" in action):\r\n # get the form data\r\n unique_student_identifier = request.POST.get(\r\n 'unique_student_identifier', ''\r\n )\r\n problem_location_str = strip_if_string(request.POST.get('problem_for_student', ''))\r\n try:\r\n module_state_key = course_key.make_usage_key_from_deprecated_string(problem_location_str)\r\n except InvalidKeyError:\r\n msg += '<font color=\"red\">{text}</font>'.format(\r\n text=_('Could not find problem location \"{url}\".').format(\r\n url=problem_location_str\r\n )\r\n )\r\n else:\r\n # try to uniquely id student by email address or username\r\n message, student = get_student_from_identifier(unique_student_identifier)\r\n msg += message\r\n student_module = None\r\n if student is not None:\r\n # Reset the student's score in the submissions API\r\n # Currently this is used only by open assessment (ORA 2)\r\n # We need to do this *before* retrieving the `StudentModule` model,\r\n # because it's possible for a score to exist even if no student module exists.\r\n if \"Delete student state for module\" in action:\r\n try:\r\n sub_api.reset_score(\r\n anonymous_id_for_user(student, course_key),\r\n course_key.to_deprecated_string(),\r\n module_state_key.to_deprecated_string(),\r\n )\r\n except sub_api.SubmissionError:\r\n # Trust the submissions API to log the error\r\n error_msg = _(\"An error occurred while deleting the score.\")\r\n msg += \"<font color='red'>{err}</font> \".format(err=error_msg)\r\n\r\n # find the module in question\r\n try:\r\n student_module = StudentModule.objects.get(\r\n student_id=student.id,\r\n course_id=course_key,\r\n module_state_key=module_state_key\r\n )\r\n msg += _(\"Found module. \")\r\n\r\n except StudentModule.DoesNotExist as err:\r\n error_msg = _(\"Couldn't find module with that urlname: {url}. \").format(url=problem_location_str)\r\n msg += \"<font color='red'>{err_msg} ({err})</font>\".format(err_msg=error_msg, err=err)\r\n log.debug(error_msg)\r\n\r\n if student_module is not None:\r\n if \"Delete student state for module\" in action:\r\n # delete the state\r\n try:\r\n student_module.delete()\r\n\r\n msg += \"<font color='red'>{text}</font>\".format(\r\n text=_(\"Deleted student module state for {state}!\").format(state=module_state_key)\r\n )\r\n event = {\r\n \"problem\": problem_location_str,\r\n \"student\": unique_student_identifier,\r\n \"course\": course_key.to_deprecated_string()\r\n }\r\n track.views.server_track(\r\n request,\r\n \"delete-student-module-state\",\r\n event,\r\n page=\"idashboard\"\r\n )\r\n except Exception as err: # pylint: disable=broad-except\r\n error_msg = _(\"Failed to delete module state for {id}/{url}. \").format(\r\n id=unique_student_identifier, url=problem_location_str\r\n )\r\n msg += \"<font color='red'>{err_msg} ({err})</font>\".format(err_msg=error_msg, err=err)\r\n log.exception(error_msg)\r\n elif \"Reset student's attempts\" in action:\r\n # modify the problem's state\r\n try:\r\n # load the state json\r\n problem_state = json.loads(student_module.state)\r\n old_number_of_attempts = problem_state[\"attempts\"]\r\n problem_state[\"attempts\"] = 0\r\n # save\r\n student_module.state = json.dumps(problem_state)\r\n student_module.save()\r\n event = {\r\n \"old_attempts\": old_number_of_attempts,\r\n \"student\": unicode(student),\r\n \"problem\": student_module.module_state_key,\r\n \"instructor\": unicode(request.user),\r\n \"course\": course_key.to_deprecated_string()\r\n }\r\n track.views.server_track(request, \"reset-student-attempts\", event, page=\"idashboard\")\r\n msg += \"<font color='green'>{text}</font>\".format(\r\n text=_(\"Module state successfully reset!\")\r\n )\r\n except Exception as err: # pylint: disable=broad-except\r\n error_msg = _(\"Couldn't reset module state for {id}/{url}. \").format(\r\n id=unique_student_identifier, url=problem_location_str\r\n )\r\n msg += \"<font color='red'>{err_msg} ({err})</font>\".format(err_msg=error_msg, err=err)\r\n log.exception(error_msg)\r\n else:\r\n # \"Rescore student's problem submission\" case\r\n try:\r\n instructor_task = submit_rescore_problem_for_student(request, module_state_key, student)\r\n if instructor_task is None:\r\n msg += '<font color=\"red\">{text}</font>'.format(\r\n text=_('Failed to create a background task for rescoring \"{key}\" for student {id}.').format(\r\n key=module_state_key, id=unique_student_identifier\r\n )\r\n )\r\n else:\r\n track.views.server_track(\r\n request,\r\n \"rescore-student-submission\",\r\n {\r\n \"problem\": module_state_key,\r\n \"student\": unique_student_identifier,\r\n \"course\": course_key.to_deprecated_string()\r\n },\r\n page=\"idashboard\"\r\n )\r\n except Exception as err: # pylint: disable=broad-except\r\n msg += '<font color=\"red\">{text}</font>'.format(\r\n text=_('Failed to create a background task for rescoring \"{key}\": {id}.').format(\r\n key=module_state_key, id=err.message\r\n )\r\n )\r\n log.exception(\"Encountered exception from rescore: student '{0}' problem '{1}'\".format(\r\n unique_student_identifier, module_state_key\r\n )\r\n )\r\n\r\n elif \"Get link to student's progress page\" in action:\r\n unique_student_identifier = request.POST.get('unique_student_identifier', '')\r\n # try to uniquely id student by email address or username\r\n message, student = get_student_from_identifier(unique_student_identifier)\r\n msg += message\r\n if student is not None:\r\n progress_url = reverse('student_progress', kwargs={\r\n 'course_id': course_key.to_deprecated_string(),\r\n 'student_id': student.id\r\n })\r\n track.views.server_track(\r\n request,\r\n \"get-student-progress-page\",\r\n {\r\n \"student\": unicode(student),\r\n \"instructor\": unicode(request.user),\r\n \"course\": course_key.to_deprecated_string()\r\n },\r\n page=\"idashboard\"\r\n )\r\n msg += \"<a href='{url}' target='_blank'>{text}</a>.\".format(\r\n url=progress_url,\r\n text=_(\"Progress page for username: {username} with email address: {email}\").format(\r\n username=student.username, email=student.email\r\n )\r\n )\r\n\r\n #----------------------------------------\r\n # export grades to remote gradebook\r\n\r\n elif action == 'List assignments available in remote gradebook':\r\n msg2, datatable = _do_remote_gradebook(request.user, course, 'get-assignments')\r\n msg += msg2\r\n\r\n elif action == 'List assignments available for this course':\r\n log.debug(action)\r\n allgrades = get_student_grade_summary_data(request, course, get_grades=True, use_offline=use_offline)\r\n\r\n assignments = [[x] for x in allgrades['assignments']]\r\n datatable = {'header': [_('Assignment Name')]}\r\n datatable['data'] = assignments\r\n datatable['title'] = action\r\n\r\n msg += 'assignments=<pre>%s</pre>' % assignments\r\n\r\n elif action == 'List enrolled students matching remote gradebook':\r\n stud_data = get_student_grade_summary_data(request, course, get_grades=False, use_offline=use_offline)\r\n msg2, rg_stud_data = _do_remote_gradebook(request.user, course, 'get-membership')\r\n datatable = {'header': ['Student email', 'Match?']}\r\n rg_students = [x['email'] for x in rg_stud_data['retdata']]\r\n\r\n def domatch(x):\r\n return 'yes' if x.email in rg_students else 'No'\r\n datatable['data'] = [[x.email, domatch(x)] for x in stud_data['students']]\r\n datatable['title'] = action\r\n\r\n elif action in ['Display grades for assignment', 'Export grades for assignment to remote gradebook',\r\n 'Export CSV file of grades for assignment']:\r\n\r\n log.debug(action)\r\n datatable = {}\r\n aname = request.POST.get('assignment_name', '')\r\n if not aname:\r\n msg += \"<font color='red'>{text}</font>\".format(text=_(\"Please enter an assignment name\"))\r\n else:\r\n allgrades = get_student_grade_summary_data(request, course, get_grades=True, use_offline=use_offline)\r\n if aname not in allgrades['assignments']:\r\n msg += \"<font color='red'>{text}</font>\".format(\r\n text=_(\"Invalid assignment name '{name}'\").format(name=aname)\r\n )\r\n else:\r\n aidx = allgrades['assignments'].index(aname)\r\n datatable = {'header': [_('External email'), aname]}\r\n ddata = []\r\n for student in allgrades['students']: # do one by one in case there is a student who has only partial grades\r\n try:\r\n ddata.append([student.email, student.grades[aidx]])\r\n except IndexError:\r\n log.debug('No grade for assignment {idx} ({name}) for student {email}'.format(\r\n idx=aidx, name=aname, email=student.email)\r\n )\r\n datatable['data'] = ddata\r\n\r\n datatable['title'] = _('Grades for assignment \"{name}\"').format(name=aname)\r\n\r\n if 'Export CSV' in action:\r\n # generate and return CSV file\r\n return return_csv('grades {name}.csv'.format(name=aname), datatable)\r\n\r\n elif 'remote gradebook' in action:\r\n file_pointer = StringIO()\r\n return_csv('', datatable, file_pointer=file_pointer)\r\n file_pointer.seek(0)\r\n files = {'datafile': file_pointer}\r\n msg2, __ = _do_remote_gradebook(request.user, course, 'post-grades', files=files)\r\n msg += msg2\r\n\r\n #----------------------------------------\r\n # Admin\r\n\r\n elif 'List course staff' in action:\r\n role = CourseStaffRole(course.id)\r\n datatable = _role_members_table(role, _(\"List of Staff\"), course_key)\r\n track.views.server_track(request, \"list-staff\", {}, page=\"idashboard\")\r\n\r\n elif 'List course instructors' in action and GlobalStaff().has_user(request.user):\r\n role = CourseInstructorRole(course.id)\r\n datatable = _role_members_table(role, _(\"List of Instructors\"), course_key)\r\n track.views.server_track(request, \"list-instructors\", {}, page=\"idashboard\")\r\n\r\n elif action == 'Add course staff':\r\n uname = request.POST['staffuser']\r\n role = CourseStaffRole(course.id)\r\n msg += add_user_to_role(request, uname, role, 'staff', 'staff')\r\n\r\n elif action == 'Add instructor' and request.user.is_staff:\r\n uname = request.POST['instructor']\r\n role = CourseInstructorRole(course.id)\r\n msg += add_user_to_role(request, uname, role, 'instructor', 'instructor')\r\n\r\n elif action == 'Remove course staff':\r\n uname = request.POST['staffuser']\r\n role = CourseStaffRole(course.id)\r\n msg += remove_user_from_role(request, uname, role, 'staff', 'staff')\r\n\r\n elif action == 'Remove instructor' and request.user.is_staff:\r\n uname = request.POST['instructor']\r\n role = CourseInstructorRole(course.id)\r\n msg += remove_user_from_role(request, uname, role, 'instructor', 'instructor')\r\n\r\n #----------------------------------------\r\n # DataDump\r\n\r\n elif 'Download CSV of all student profile data' in action:\r\n enrolled_students = User.objects.filter(\r\n courseenrollment__course_id=course_key,\r\n courseenrollment__is_active=1,\r\n ).order_by('username').select_related(\"profile\")\r\n profkeys = ['name', 'language', 'location', 'year_of_birth', 'gender', 'level_of_education',\r\n 'mailing_address', 'goals']\r\n datatable = {'header': ['username', 'email'] + profkeys}\r\n\r\n def getdat(user):\r\n \"\"\"\r\n Return a list of profile data for the given user.\r\n \"\"\"\r\n profile = user.profile\r\n return [user.username, user.email] + [getattr(profile, xkey, '') for xkey in profkeys]\r\n\r\n datatable['data'] = [getdat(u) for u in enrolled_students]\r\n datatable['title'] = _('Student profile data for course {course_id}').format(\r\n course_id=course_key.to_deprecated_string()\r\n )\r\n return return_csv(\r\n 'profiledata_{course_id}.csv'.format(course_id=course_key.to_deprecated_string()),\r\n datatable\r\n )\r\n\r\n elif 'Download CSV of all responses to problem' in action:\r\n problem_to_dump = request.POST.get('problem_to_dump', '')\r\n\r\n if problem_to_dump[-4:] == \".xml\":\r\n problem_to_dump = problem_to_dump[:-4]\r\n try:\r\n module_state_key = course_key.make_usage_key(block_type='problem', name=problem_to_dump)\r\n smdat = StudentModule.objects.filter(\r\n course_id=course_key,\r\n module_state_key=module_state_key\r\n )\r\n smdat = smdat.order_by('student')\r\n msg += _(\"Found {num} records to dump.\").format(num=smdat)\r\n except Exception as err: # pylint: disable=broad-except\r\n msg += \"<font color='red'>{text}</font><pre>{err}</pre>\".format(\r\n text=_(\"Couldn't find module with that urlname.\"),\r\n err=escape(err)\r\n )\r\n smdat = []\r\n\r\n if smdat:\r\n datatable = {'header': ['username', 'state']}\r\n datatable['data'] = [[x.student.username, x.state] for x in smdat]\r\n datatable['title'] = _('Student state for problem {problem}').format(problem=problem_to_dump)\r\n return return_csv('student_state_from_{problem}.csv'.format(problem=problem_to_dump), datatable)\r\n\r\n elif 'Download CSV of all student anonymized IDs' in action:\r\n students = User.objects.filter(\r\n courseenrollment__course_id=course_key,\r\n ).order_by('id')\r\n\r\n datatable = {'header': ['User ID', 'Anonymized user ID', 'Course Specific Anonymized user ID']}\r\n datatable['data'] = [[s.id, unique_id_for_user(s), anonymous_id_for_user(s, course_id)] for s in students]\r\n return return_csv(course_key.to_deprecated_string().replace('/', '-') + '-anon-ids.csv', datatable)\r\n\r\n #----------------------------------------\r\n # Group management\r\n\r\n elif 'List beta testers' in action:\r\n role = CourseBetaTesterRole(course.id)\r\n datatable = _role_members_table(role, _(\"List of Beta Testers\"), course_key)\r\n track.views.server_track(request, \"list-beta-testers\", {}, page=\"idashboard\")\r\n\r\n elif action == 'Add beta testers':\r\n users = request.POST['betausers']\r\n log.debug(\"users: {0!r}\".format(users))\r\n role = CourseBetaTesterRole(course.id)\r\n for username_or_email in split_by_comma_and_whitespace(users):\r\n msg += \"<p>{0}</p>\".format(\r\n add_user_to_role(request, username_or_email, role, 'beta testers', 'beta-tester'))\r\n\r\n elif action == 'Remove beta testers':\r\n users = request.POST['betausers']\r\n role = CourseBetaTesterRole(course.id)\r\n for username_or_email in split_by_comma_and_whitespace(users):\r\n msg += \"<p>{0}</p>\".format(\r\n remove_user_from_role(request, username_or_email, role, 'beta testers', 'beta-tester'))\r\n\r\n #----------------------------------------\r\n # forum administration\r\n\r\n elif action == 'List course forum admins':\r\n rolename = FORUM_ROLE_ADMINISTRATOR\r\n datatable = {}\r\n msg += _list_course_forum_members(course_key, rolename, datatable)\r\n track.views.server_track(\r\n request, \"list-forum-admins\", {\"course\": course_key.to_deprecated_string()}, page=\"idashboard\"\r\n )\r\n\r\n elif action == 'Remove forum admin':\r\n uname = request.POST['forumadmin']\r\n msg += _update_forum_role_membership(uname, course, FORUM_ROLE_ADMINISTRATOR, FORUM_ROLE_REMOVE)\r\n track.views.server_track(\r\n request, \"remove-forum-admin\", {\"username\": uname, \"course\": course_key.to_deprecated_string()},\r\n page=\"idashboard\"\r\n )\r\n\r\n elif action == 'Add forum admin':\r\n uname = request.POST['forumadmin']\r\n msg += _update_forum_role_membership(uname, course, FORUM_ROLE_ADMINISTRATOR, FORUM_ROLE_ADD)\r\n track.views.server_track(\r\n request, \"add-forum-admin\", {\"username\": uname, \"course\": course_key.to_deprecated_string()},\r\n page=\"idashboard\"\r\n )\r\n\r\n elif action == 'List course forum moderators':\r\n rolename = FORUM_ROLE_MODERATOR\r\n datatable = {}\r\n msg += _list_course_forum_members(course_key, rolename, datatable)\r\n track.views.server_track(\r\n request, \"list-forum-mods\", {\"course\": course_key.to_deprecated_string()}, page=\"idashboard\"\r\n )\r\n\r\n elif action == 'Remove forum moderator':\r\n uname = request.POST['forummoderator']\r\n msg += _update_forum_role_membership(uname, course, FORUM_ROLE_MODERATOR, FORUM_ROLE_REMOVE)\r\n track.views.server_track(\r\n request, \"remove-forum-mod\", {\"username\": uname, \"course\": course_key.to_deprecated_string()},\r\n page=\"idashboard\"\r\n )\r\n\r\n elif action == 'Add forum moderator':\r\n uname = request.POST['forummoderator']\r\n msg += _update_forum_role_membership(uname, course, FORUM_ROLE_MODERATOR, FORUM_ROLE_ADD)\r\n track.views.server_track(\r\n request, \"add-forum-mod\", {\"username\": uname, \"course\": course_key.to_deprecated_string()},\r\n page=\"idashboard\"\r\n )\r\n\r\n elif action == 'List course forum community TAs':\r\n rolename = FORUM_ROLE_COMMUNITY_TA\r\n datatable = {}\r\n msg += _list_course_forum_members(course_key, rolename, datatable)\r\n track.views.server_track(\r\n request, \"list-forum-community-TAs\", {\"course\": course_key.to_deprecated_string()},\r\n page=\"idashboard\"\r\n )\r\n\r\n elif action == 'Remove forum community TA':\r\n uname = request.POST['forummoderator']\r\n msg += _update_forum_role_membership(uname, course, FORUM_ROLE_COMMUNITY_TA, FORUM_ROLE_REMOVE)\r\n track.views.server_track(\r\n request, \"remove-forum-community-TA\", {\r\n \"username\": uname, \"course\": course_key.to_deprecated_string()\r\n },\r\n page=\"idashboard\"\r\n )\r\n\r\n elif action == 'Add forum community TA':\r\n uname = request.POST['forummoderator']\r\n msg += _update_forum_role_membership(uname, course, FORUM_ROLE_COMMUNITY_TA, FORUM_ROLE_ADD)\r\n track.views.server_track(\r\n request, \"add-forum-community-TA\", {\r\n \"username\": uname, \"course\": course_key.to_deprecated_string()\r\n },\r\n page=\"idashboard\"\r\n )\r\n\r\n #----------------------------------------\r\n # enrollment\r\n\r\n elif action == 'List students who may enroll but may not have yet signed up':\r\n ceaset = CourseEnrollmentAllowed.objects.filter(course_id=course_key)\r\n datatable = {'header': ['StudentEmail']}\r\n datatable['data'] = [[x.email] for x in ceaset]\r\n datatable['title'] = action\r\n\r\n elif action == 'Enroll multiple students':\r\n\r\n is_shib_course = uses_shib(course)\r\n students = request.POST.get('multiple_students', '')\r\n auto_enroll = bool(request.POST.get('auto_enroll'))\r\n email_students = bool(request.POST.get('email_students'))\r\n ret = _do_enroll_students(course, course_key, students, auto_enroll=auto_enroll, email_students=email_students, is_shib_course=is_shib_course)\r\n datatable = ret['datatable']\r\n\r\n elif action == 'Unenroll multiple students':\r\n\r\n students = request.POST.get('multiple_students', '')\r\n email_students = bool(request.POST.get('email_students'))\r\n ret = _do_unenroll_students(course_key, students, email_students=email_students)\r\n datatable = ret['datatable']\r\n\r\n elif action == 'List sections available in remote gradebook':\r\n\r\n msg2, datatable = _do_remote_gradebook(request.user, course, 'get-sections')\r\n msg += msg2\r\n\r\n elif action in ['List students in section in remote gradebook',\r\n 'Overload enrollment list using remote gradebook',\r\n 'Merge enrollment list with remote gradebook']:\r\n\r\n section = request.POST.get('gradebook_section', '')\r\n msg2, datatable = _do_remote_gradebook(request.user, course, 'get-membership', dict(section=section))\r\n msg += msg2\r\n\r\n if not 'List' in action:\r\n students = ','.join([x['email'] for x in datatable['retdata']])\r\n overload = 'Overload' in action\r\n ret = _do_enroll_students(course, course_key, students, overload=overload)\r\n datatable = ret['datatable']\r\n\r\n #----------------------------------------\r\n # email\r\n\r\n elif action == 'Send email':\r\n email_to_option = request.POST.get(\"to_option\")\r\n email_subject = request.POST.get(\"subject\")\r\n html_message = request.POST.get(\"message\")\r\n\r\n if bulk_email_is_enabled_for_course(course_key):\r\n try:\r\n # Create the CourseEmail object. This is saved immediately, so that\r\n # any transaction that has been pending up to this point will also be\r\n # committed.\r\n email = CourseEmail.create(\r\n course_key.to_deprecated_string(), request.user, email_to_option, email_subject, html_message\r\n )\r\n\r\n # Submit the task, so that the correct InstructorTask object gets created (for monitoring purposes)\r\n submit_bulk_course_email(request, course_key, email.id) # pylint: disable=E1101\r\n\r\n except Exception as err: # pylint: disable=broad-except\r\n # Catch any errors and deliver a message to the user\r\n error_msg = \"Failed to send email! ({0})\".format(err)\r\n msg += \"<font color='red'>\" + error_msg + \"</font>\"\r\n log.exception(error_msg)\r\n\r\n else:\r\n # If sending the task succeeds, deliver a success message to the user.\r\n if email_to_option == \"all\":\r\n text = _(\r\n \"Your email was successfully queued for sending. \"\r\n \"Please note that for large classes, it may take up to an hour \"\r\n \"(or more, if other courses are simultaneously sending email) \"\r\n \"to send all emails.\"\r\n )\r\n else:\r\n text = _('Your email was successfully queued for sending.')\r\n email_msg = '<div class=\"msg msg-confirm\"><p class=\"copy\">{text}</p></div>'.format(text=text)\r\n else:\r\n msg += \"<font color='red'>Email is not enabled for this course.</font>\"\r\n\r\n elif \"Show Background Email Task History\" in action:\r\n message, datatable = get_background_task_table(course_key, task_type='bulk_course_email')\r\n msg += message\r\n\r\n elif \"Show Background Email Task History\" in action:\r\n message, datatable = get_background_task_table(course_key, task_type='bulk_course_email')\r\n msg += message\r\n\r\n #----------------------------------------\r\n # psychometrics\r\n\r\n elif action == 'Generate Histogram and IRT Plot':\r\n problem = request.POST['Problem']\r\n nmsg, plots = psychoanalyze.generate_plots_for_problem(problem)\r\n msg += nmsg\r\n track.views.server_track(request, \"psychometrics-histogram-generation\", {\"problem\": unicode(problem)}, page=\"idashboard\")\r\n\r\n if idash_mode == 'Psychometrics':\r\n problems = psychoanalyze.problems_with_psychometric_data(course_key)\r\n\r\n #----------------------------------------\r\n # analytics\r\n def get_analytics_result(analytics_name):\r\n \"\"\"Return data for an Analytic piece, or None if it doesn't exist. It\r\n logs and swallows errors.\r\n \"\"\"\r\n url = settings.ANALYTICS_SERVER_URL + \\\r\n u\"get?aname={}&course_id={}&apikey={}\".format(\r\n analytics_name, course_key.to_deprecated_string(), settings.ANALYTICS_API_KEY\r\n )\r\n try:\r\n res = requests.get(url)\r\n except Exception: # pylint: disable=broad-except\r\n log.exception(\"Error trying to access analytics at %s\", url)\r\n return None\r\n\r\n if res.status_code == codes.OK:\r\n # WARNING: do not use req.json because the preloaded json doesn't\r\n # preserve the order of the original record (hence OrderedDict).\r\n return json.loads(res.content, object_pairs_hook=OrderedDict)\r\n else:\r\n log.error(\"Error fetching %s, code: %s, msg: %s\",\r\n url, res.status_code, res.content)\r\n return None\r\n\r\n analytics_results = {}\r\n\r\n if idash_mode == 'Analytics':\r\n DASHBOARD_ANALYTICS = [\r\n # \"StudentsAttemptedProblems\", # num students who tried given problem\r\n \"StudentsDailyActivity\", # active students by day\r\n \"StudentsDropoffPerDay\", # active students dropoff by day\r\n # \"OverallGradeDistribution\", # overall point distribution for course\r\n \"StudentsActive\", # num students active in time period (default = 1wk)\r\n \"StudentsEnrolled\", # num students enrolled\r\n # \"StudentsPerProblemCorrect\", # foreach problem, num students correct\r\n \"ProblemGradeDistribution\", # foreach problem, grade distribution\r\n ]\r\n for analytic_name in DASHBOARD_ANALYTICS:\r\n analytics_results[analytic_name] = get_analytics_result(analytic_name)\r\n\r\n #----------------------------------------\r\n # Metrics\r\n\r\n metrics_results = {}\r\n if settings.FEATURES.get('CLASS_DASHBOARD') and idash_mode == 'Metrics':\r\n metrics_results['section_display_name'] = dashboard_data.get_section_display_name(course_key)\r\n metrics_results['section_has_problem'] = dashboard_data.get_array_section_has_problem(course_key)\r\n\r\n #----------------------------------------\r\n # offline grades?\r\n\r\n if use_offline:\r\n msg += \"<br/><font color='orange'>{text}</font>\".format(\r\n text=_(\"Grades from {course_id}\").format(\r\n course_id=offline_grades_available(course_key)\r\n )\r\n )\r\n\r\n # generate list of pending background tasks\r\n if settings.FEATURES.get('ENABLE_INSTRUCTOR_BACKGROUND_TASKS'):\r\n instructor_tasks = get_running_instructor_tasks(course_key)\r\n else:\r\n instructor_tasks = None\r\n\r\n # determine if this is a studio-backed course so we can provide a link to edit this course in studio\r\n is_studio_course = modulestore().get_modulestore_type(course_key) != XML_MODULESTORE_TYPE\r\n studio_url = None\r\n if is_studio_course:\r\n studio_url = get_cms_course_link(course)\r\n\r\n email_editor = None\r\n # HTML editor for email\r\n if idash_mode == 'Email' and is_studio_course:\r\n html_module = HtmlDescriptor(\r\n course.system,\r\n DictFieldData({'data': html_message}),\r\n ScopeIds(None, None, None, course_key.make_usage_key('html', 'dummy'))\r\n )\r\n fragment = html_module.render('studio_view')\r\n fragment = wrap_xblock(\r\n 'LmsRuntime', html_module, 'studio_view', fragment, None,\r\n extra_data={\"course-id\": course_key.to_deprecated_string()},\r\n usage_id_serializer=lambda usage_id: quote_slashes(usage_id.to_deprecated_string())\r\n )\r\n email_editor = fragment.content\r\n\r\n # Enable instructor email only if the following conditions are met:\r\n # 1. Feature flag is on\r\n # 2. We have explicitly enabled email for the given course via django-admin\r\n # 3. It is NOT an XML course\r\n if bulk_email_is_enabled_for_course(course_key):\r\n show_email_tab = True\r\n\r\n # display course stats only if there is no other table to display:\r\n course_stats = None\r\n if not datatable:\r\n course_stats = get_course_stats_table()\r\n\r\n # disable buttons for large courses\r\n disable_buttons = False\r\n max_enrollment_for_buttons = settings.FEATURES.get(\"MAX_ENROLLMENT_INSTR_BUTTONS\")\r\n if max_enrollment_for_buttons is not None:\r\n disable_buttons = enrollment_number > max_enrollment_for_buttons\r\n\r\n #----------------------------------------\r\n # context for rendering\r\n\r\n context = {\r\n 'course': course,\r\n 'staff_access': True,\r\n 'admin_access': request.user.is_staff,\r\n 'instructor_access': instructor_access,\r\n 'forum_admin_access': forum_admin_access,\r\n 'datatable': datatable,\r\n 'course_stats': course_stats,\r\n 'msg': msg,\r\n 'modeflag': {idash_mode: 'selectedmode'},\r\n 'studio_url': studio_url,\r\n\r\n 'to_option': email_to_option, # email\r\n 'subject': email_subject, # email\r\n 'editor': email_editor, # email\r\n 'email_msg': email_msg, # email\r\n 'show_email_tab': show_email_tab, # email\r\n\r\n 'problems': problems, # psychometrics\r\n 'plots': plots, # psychometrics\r\n 'course_errors': modulestore().get_course_errors(course.id),\r\n 'instructor_tasks': instructor_tasks,\r\n 'offline_grade_log': offline_grades_available(course_key),\r\n 'cohorts_ajax_url': reverse('cohorts', kwargs={'course_key': course_key.to_deprecated_string()}),\r\n\r\n 'analytics_results': analytics_results,\r\n 'disable_buttons': disable_buttons,\r\n 'metrics_results': metrics_results,\r\n }\r\n\r\n context['standard_dashboard_url'] = reverse('instructor_dashboard', kwargs={'course_id': course_key.to_deprecated_string()})\r\n\r\n return render_to_response('courseware/instructor_dashboard.html', context)", "def dashboard(request):\r\n if not request.user.is_staff:\r\n raise Http404\r\n\r\n # results are passed to the template. The template knows how to render\r\n # two types of results: scalars and tables. Scalars should be represented\r\n # as \"Visible Title\": Value and tables should be lists of lists where each\r\n # inner list represents a single row of the table\r\n results = {\"scalars\":{},\"tables\":{}}\r\n\r\n # count how many users we have\r\n results[\"scalars\"][\"Unique Usernames\"]=User.objects.filter().count()\r\n results[\"scalars\"][\"Activated Usernames\"]=User.objects.filter(is_active=1).count()\r\n\r\n # count how many enrollments we have\r\n results[\"scalars\"][\"Total Enrollments Across All Courses\"] = CourseEnrollment.objects.filter(is_active=1).count()\r\n\r\n # establish a direct connection to the database (for executing raw SQL)\r\n cursor = connection.cursor()\r\n\r\n # define the queries that will generate our user-facing tables\r\n # table queries need not take the form of raw SQL, but do in this case since\r\n # the MySQL backend for django isn't very friendly with group by or distinct\r\n table_queries = {}\r\n table_queries[\"course registrations (current enrollments)\"] = \"\"\"\r\n select\r\n course_id as Course,\r\n count(user_id) as Students\r\n from student_courseenrollment\r\n where is_active=1\r\n group by course_id\r\n order by students desc;\"\"\"\r\n table_queries[\"number of students in each number of classes\"] = \"\"\"\r\n select registrations as 'Registered for __ Classes' ,\r\n count(registrations) as Users\r\n from (select count(user_id) as registrations\r\n from student_courseenrollment\r\n where is_active=1\r\n group by user_id) as registrations_per_user\r\n group by registrations;\"\"\"\r\n\r\n # add the result for each of the table_queries to the results object\r\n for query in table_queries.keys():\r\n cursor.execute(table_queries[query])\r\n results[\"tables\"][query] = SQL_query_to_list(cursor, table_queries[query])\r\n\r\n context={\"results\":results}\r\n\r\n return render_to_response(\"admin_dashboard.html\",context)" ]
[ "0.6137047", "0.59800327", "0.5926279", "0.59021765", "0.5885904", "0.58642113", "0.58123016", "0.5776785", "0.5736159", "0.5716246", "0.5671638", "0.5669275", "0.5610859", "0.5595668", "0.55178446", "0.5484243", "0.54227275", "0.5420696", "0.5375232", "0.53611064", "0.5234084", "0.5226898", "0.5208686", "0.52017325", "0.51998556", "0.5198017", "0.5184696", "0.5181673", "0.5150607", "0.5143832" ]
0.7423683
0
Set up coefficients of an assignmentype (id=pk)
def coeff_assignmentype(request, pk): prof = request.user.prof context = {'prof': prof} assignmentype = Assignmentype.objects.filter(pk=pk, prof=prof).first() if assignmentype: nb_questions = assignmentype.nb_questions if request.method == 'POST': form = CoeffForm(request.POST, nb_questions=nb_questions) if form.is_valid(): assignmentype.questions_coeff = [form.cleaned_data['coeff_%s' % i] for i in range(1, assignmentype. nb_questions + 1)] assignmentype.save() # Compute all grades log = tasks.compute_grades_assignmentype(assignmentype.id) logger.error(log) return redirect('/detail_assignmentype/%s/' % pk) else: questions_coeff = assignmentype.questions_coeff coeff = {} if questions_coeff: for i in range(1, nb_questions + 1): coeff['coeff_%s' % i] = assignmentype.questions_coeff[i - 1] else: coeff = dict.fromkeys(['coeff_%s' % i for i in range(1, nb_questions + 1)], None) form = CoeffForm(nb_questions=nb_questions, initial=coeff) context['form'] = form context['assignmentype'] = assignmentype return render(request, 'gradapp/coeff_assignmentype.html', context) return redirect('gradapp:list_assignmentypes_running')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, coefficients):\n self.coefficients = coefficients", "def CreateCoefficientPolyfitTables(self):\n for currentPr in self.polars:\n for currentPolar in currentPr[1]:\n # Combine (Pr, AOA) as a point\n self.points.append([currentPr[0], currentPolar[0]])\n\n # Corresponding to CL, CD and Cm value\n self.valuesCL.append(currentPolar[1])\n self.valuesCD.append(currentPolar[2])\n self.valuesCm.append(currentPolar[3])\n\n for Pr in self.Prs:\n self.AOAs_Pr.append([point[1] for point in self.points if point[0] == Pr])\n self.CLs_Pr.append([self.valuesCL[i] for i in range(len(self.points)) if self.points[i][0] == Pr])\n self.CDs_Pr.append([self.valuesCD[i] for i in range(len(self.points)) if self.points[i][0] == Pr])\n self.Cms_Pr.append([self.valuesCm[i] for i in range(len(self.points)) if self.points[i][0] == Pr])\n\n # Fit coefficients to AOAs\n if self.weightedFit:\n w = np.array([np.cos((a - 2.5) / 5 * np.pi / 4) for a in self.AOAs_Pr[-1]])\n # Fit coefficients with weight\n self.CLfit_Pr.append(np.polyfit(self.AOAs_Pr[-1], self.CLs_Pr[-1],\n self.CLfitOrder, w=w))\n self.CDfit_Pr.append(np.polyfit(self.AOAs_Pr[-1], self.CDs_Pr[-1],\n self.CDfitOrder, w=w))\n self.Cmfit_Pr.append(np.polyfit(self.AOAs_Pr[-1], self.Cms_Pr[-1],\n self.CmfitOrder, w=w))\n\n else:\n self.CLfit_Pr.append(np.polyfit(self.AOAs_Pr[-1], self.CLs_Pr[-1], self.CLfitOrder))\n self.CDfit_Pr.append(np.polyfit(self.AOAs_Pr[-1], self.CDs_Pr[-1], self.CDfitOrder))\n self.Cmfit_Pr.append(np.polyfit(self.AOAs_Pr[-1], self.Cms_Pr[-1], self.CmfitOrder))", "def CreateCoefficientPolyfitTables(self):\n for currentRe in self.polars:\n for currentPolar in currentRe[1]:\n # Combine (Re, AOA) as a point\n self.points.append([currentRe[0], currentPolar[0]])\n\n # Corresponding to CL, CDp and Cm value\n self.valuesCL.append(currentPolar[1])\n self.valuesCDp.append(currentPolar[2])\n self.valuesCm.append(currentPolar[3])\n\n for Re in self.Res:\n self.AOAs_Re.append([point[1] for point in self.points if point[0] == Re])\n self.CLs_Re.append([self.valuesCL[i] for i in range(len(self.points)) if self.points[i][0] == Re])\n self.CDps_Re.append([self.valuesCDp[i] for i in range(len(self.points)) if self.points[i][0] == Re])\n self.Cms_Re.append([self.valuesCm[i] for i in range(len(self.points)) if self.points[i][0] == Re])\n\n # Fit coefficients to AOAs\n if self.weightedFit:\n w = np.array([np.cos((a - 2.5) / 5 * np.pi / 4) for a in self.AOAs_Re[-1]])\n # Fit coefficients with weight\n self.CLfit_Re.append(np.polyfit(self.AOAs_Re[-1], self.CLs_Re[-1],\n self.CLfitOrder, w=w))\n self.CDpfit_Re.append(np.polyfit(self.AOAs_Re[-1], self.CDps_Re[-1],\n self.CDpfitOrder, w=w))\n self.Cmfit_Re.append(np.polyfit(self.AOAs_Re[-1], self.Cms_Re[-1],\n self.CmfitOrder, w=w))\n\n else:\n self.CLfit_Re.append(np.polyfit(self.AOAs_Re[-1], self.CLs_Re[-1], self.CLfitOrder))\n self.CDpfit_Re.append(np.polyfit(self.AOAs_Re[-1], self.CDps_Re[-1], self.CDpfitOrder))\n self.Cmfit_Re.append(np.polyfit(self.AOAs_Re[-1], self.Cms_Re[-1], self.CmfitOrder))", "def update_coeff(self, **kwargs: float) -> None:\n for rule_name, coeff in kwargs.items():\n if rule_name not in self.rules:\n raise ValueError(f\"Behavioral rule {rule_name} does not exist\")\n else:\n self.rules[getattr(self, rule_name)] = coeff", "def coefficients(self) :\n raise NotImplementedError", "def __init__(self, name, coeff=1, metric=None):\n self.name = name\n self.coeff = coeff # To be removed\n self.metric = None", "def updateCoeff(self, **args):\n for par in args:\n self.rateCoeffMeta[par] = args[par]\n meta = self.rateCoeffMeta\n if self.rateCoeffMeta['type'] ==\"constant\":\n self.k = cp.k_const(meta['k'])\n elif self.rateCoeffMeta['type'] ==\"Arrhenius\":\n self.k = cp.k_arr(meta['A'], meta['E'], meta['T'], meta['R'])\n elif self.rateCoeffMeta['type'] ==\"modifiedArrhenius\":\n self.k = cp.k_mod_arr(meta['A'], meta['b'], meta['E'], meta['T'], meta['R'])\n else:\n # Other type of reaction rate coeff\n self.k = None # k = cp.newMethodToComputeK(...)\n return", "def __init__(self, coeff, expt):\n # assign a attribute to the object.\n \n self.coeff = coeff\n \n if self.coeff.is_nan() :\n self.expt = 0\n elif self.is_zero() == False :\n self.expt = int(expt)\n else :\n self.expt = 0", "def set_coefficients(cls):\r\n \"\"\" EXECUTE THIS FUNCTION IN THE FARM CLASS! \"\"\"\r\n\r\n # select file names according to airfoil section:\r\n if cls.SEC == 1:\r\n clfile, cdfile = 'naca0012cl.csv', 'naca0012cd.csv'\r\n elif cls.SEC == 2:\r\n clfile, cdfile = 'naca0015cl.csv', 'naca0015cd.csv'\r\n elif cls.SEC == 3:\r\n clfile, cdfile = 'naca0018cl.csv', 'naca0018cd.csv'\r\n elif cls.SEC == 4:\r\n clfile, cdfile = 'naca0021cl.csv', 'naca0021cd.csv'\r\n elif cls.SEC == 5:\r\n clfile, cdfile = 'du06w200cl.csv', 'du06w200cd.csv'\r\n else:\r\n raise Exception('Input error: invalid airfoil section number!')\r\n \r\n # load arrays of coefficients:\r\n CL = np.loadtxt(clfile, delimiter=',')\r\n CD = np.loadtxt(cdfile, delimiter=',')\r\n\r\n # angle of attack and reynolds tables:\r\n if cls.SEC != 5:\r\n AA = np.loadtxt('nacaaa.csv', unpack=True)\r\n RE = np.loadtxt('nacare.csv', unpack=True)\r\n else:\r\n AA = np.loadtxt('du06w200aa.csv', unpack=True)\r\n RE = np.loadtxt('du06w200re.csv', unpack=True) \r\n \r\n # create functions for lift and drag coefficients:\r\n fCL = interp2d(RE, AA, CL, kind='cubic')\r\n fCD = interp2d(RE, AA, CD, kind='cubic')\r\n \r\n # vectorize lift and drag functions:\r\n cls.v_fCL, cls.v_fCD = np.vectorize(fCL), np.vectorize(fCD)", "def __init__ (self, equ_type='none' , extra='none'):\n self.equ_type = self.set_equation_type(equ_type)\n self.coeffs = []\n self.extra = str(extra)", "def __init__(self):\n self.name = \"Osyczka\"\n objectives = [ob_os_1, ob_os_2]\n constraints = [con_os_1, con_os_2, con_os_3, con_os_4, con_os_5, con_os_6]\n decisions = [Decision(0, 10), Decision(0, 10), Decision(1, 5), Decision(0, 6), Decision(1, 5), Decision(0, 10)]\n Model.__init__(self, objectives, constraints, decisions)", "def setParams(self, p = 2):\n self.p = p\n self.l = p - 1\n self.id_ntot = {}\n self.id_y = {}\n self.id_W = {}\n self.id_X = {}\n for i in self.uniids:\n tracker = (self.data['id'] == i)\n self.id_ntot.update({i: np.sum(tracker)})\n self.id_y.update({i:\n self.data['weight'][tracker].reshape(np.sum(tracker), 1)})\n self.id_W.update({i: self._designMatrix_(p, tracker)})\n self.id_X.update({i:\n self._designMatrix_(self.l+1,tracker,is_X=True)})\n self.id_Z = self.id_W.copy()", "def __init__(self, polyhedron, data):\n super(Equation, self).__init__(polyhedron, data)", "def test_polynomial_init_with_constraints(model_class):\n\n # Just determine which parameter to place a constraint on; it doesn't\n # matter which parameter it is to exhibit the problem so long as it's a\n # valid parameter for the model\n if \"1D\" in model_class.__name__:\n param = \"c0\"\n else:\n param = \"c0_0\"\n\n if issubclass(model_class, Linear1D):\n param = \"intercept\"\n\n if issubclass(model_class, OrthoPolynomialBase):\n degree = (2, 2)\n else:\n degree = (2,)\n\n m = model_class(*degree, fixed={param: True})\n\n assert m.fixed[param] is True\n assert getattr(m, param).fixed is True\n\n if issubclass(model_class, OrthoPolynomialBase):\n assert (\n repr(m)\n == f\"<{model_class.__name__}(2, 2, c0_0=0., c1_0=0., c2_0=0., c0_1=0., \"\n \"c1_1=0., c2_1=0., c0_2=0., c1_2=0., c2_2=0.)>\"\n )\n assert (\n str(m) == f\"Model: {model_class.__name__}\\n\"\n \"Inputs: ('x', 'y')\\n\"\n \"Outputs: ('z',)\\n\"\n \"Model set size: 1\\n\"\n \"X_Degree: 2\\n\"\n \"Y_Degree: 2\\n\"\n \"Parameters:\\n\"\n \" c0_0 c1_0 c2_0 c0_1 c1_1 c2_1 c0_2 c1_2 c2_2\\n\"\n \" ---- ---- ---- ---- ---- ---- ---- ---- ----\\n\"\n \" 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0\"\n )\n else:\n if model_class.__name__ == \"Polynomial2D\":\n assert (\n repr(m) == \"<Polynomial2D(2, c0_0=0., c1_0=0., c2_0=0., \"\n \"c0_1=0., c0_2=0., c1_1=0.)>\"\n )\n assert (\n str(m) == \"Model: Polynomial2D\\n\"\n \"Inputs: ('x', 'y')\\n\"\n \"Outputs: ('z',)\\n\"\n \"Model set size: 1\\n\"\n \"Degree: 2\\n\"\n \"Parameters:\\n\"\n \" c0_0 c1_0 c2_0 c0_1 c0_2 c1_1\\n\"\n \" ---- ---- ---- ---- ---- ----\\n\"\n \" 0.0 0.0 0.0 0.0 0.0 0.0\"\n )\n elif model_class.__name__ == \"Linear1D\":\n assert repr(m) == \"<Linear1D(slope=2., intercept=0.)>\"\n assert (\n str(m) == \"Model: Linear1D\\n\"\n \"Inputs: ('x',)\\n\"\n \"Outputs: ('y',)\\n\"\n \"Model set size: 1\\n\"\n \"Parameters:\\n\"\n \" slope intercept\\n\"\n \" ----- ---------\\n\"\n \" 2.0 0.0\"\n )\n else:\n assert repr(m) == f\"<{model_class.__name__}(2, c0=0., c1=0., c2=0.)>\"\n assert (\n str(m) == f\"Model: {model_class.__name__}\\n\"\n \"Inputs: ('x',)\\n\"\n \"Outputs: ('y',)\\n\"\n \"Model set size: 1\\n\"\n \"Degree: 2\\n\"\n \"Parameters:\\n\"\n \" c0 c1 c2\\n\"\n \" --- --- ---\\n\"\n \" 0.0 0.0 0.0\"\n )", "def _screener_init(self):\n subject = 0\n self.model.uu.remove(subject) # selects from untested and performs e\n self.model.tt.append(subject)\n self.model.b -= (self.cz + self.cy) # update budget\n self.model.z[subject] = self.z[subject] # update model\n self.model.y[subject] = self.y[subject]", "def __init__(self, model):\n if model == \"biblis\" :\n data = [1.3200e+00, 2.7720e-01, 2.6562e-03, \\\n 7.1596e-02, 0.00000000, 0.00000000, \\\n 2.3106e-02] \n else :\n raise Exception(\"Reflector model not available\")\n self.model = model\n # Default group data.\n self.D1 = data[0]\n self.D2 = data[1]\n self.A1 = data[2]\n self.A2 = data[3] \n self.F1 = data[4]\n self.F2 = data[5] \n self.S12 = data[6] \n self.R1 = self.A1 + self.S12", "def set_coefficients(self, a_n, physical_space=False, lobatto=True,\n use_mp=False, dps=None):\n self._check_a_n(a_n)\n if physical_space:\n self.a_n = self._from_physical_space(a_n, lobatto=lobatto,\n use_mp=use_mp, dps=dps)\n else:\n self.a_n = a_n", "def _bld_p_m1(self, name, coeff_funcs, solver, z_coeff_key='z', acodes=None, sense=opt.SENSE_MAXIMIZE, mask=None):\n p = opt.Problem(name, sense=sense, solver=solver)\n p.coeff_funcs = coeff_funcs\n p.formulation = 1\n self._problems[name] = p\n p.trees, p._vars = self._gen_vars_m1(coeff_funcs, acodes=acodes, mask=mask)\n for i, tree in list(p.trees.items()):\n cname = 'cov_%i' % hash(i)\n coeffs = {'x_%i' % hash((i, tuple(n.data('acode') for n in path))):1. for path in tree.paths()}\n p.add_constraint(name=cname, coeffs=coeffs, sense=opt.SENSE_EQ, rhs=1.)\n for path in tree.paths():\n try:\n p._z['x_%i' % hash((i, tuple(n.data('acode') for n in path)))] = path[-1].data(z_coeff_key)\n except Exception as e:\n print('error processing tree', i)\n print(e)\n import pdb\n pdb.set_trace()\n assert False\n return p", "def coefficients(self, force_characters = False) :\n raise NotImplementedError", "def __init__(self,x,y, alpha = 0):\n self.x = x\n self.y = y\n self.alpha = alpha\n if len(x) != len(y): raise LRDataException(\"Lengths of input and response don't match\") \n if len(x) == 0: raise LRDataException(\"Data set is empty\")\n # Precalculate {y_i*x_ij} for all j\n self.xy = x*y[:,None]", "def __init__(self, coefficients, name=None):\n coefficients = {v: str(c) for v,c in coefficients.iteritems()}\n expr = ' + '.join('%s*%s' % (c,v) for v,c in coefficients.iteritems())\n Function.__init__(self, expr, variables=set(coefficients),\n first_derivatives=coefficients, name=name)\n self.coefficients = coefficients", "def __init__(self, data_table, answers):\n\t\tBasicRegression.__init__(self,data_table,answers)\n\t\tself.add_intercept()", "def __init__(self, obj):\n if type(obj) is Monomial:\n Polynomial.__init__(self, obj)\n else:\n Polynomial.__init__(self, *obj.monomials)", "def __init__(self, model):\n self.model = model\n self.n = 0\n\n self.ctetra4 = model.ctetra4\n self.cpyram5 = model.cpyram5\n self.cpenta6 = model.cpenta6\n self.chexa8 = model.chexa8\n\n self.ctetra10 = model.ctetra10\n self.cpyram13 = model.cpyram13\n self.cpenta15 = model.cpenta15\n self.chexa20 = model.chexa20\n self.element_id = array([], dtype='int32')", "def __init__(self, poly, ambient=None):\n if not is_MPolynomial(poly):\n raise TypeError(\"Defining polynomial (= %s) must be a multivariate polynomial\"%poly)\n if ambient is None:\n R = poly.parent()\n from sage.schemes.affine.affine_space import AffineSpace\n ambient = AffineSpace(R.base_ring(), R.ngens())\n ambient._coordinate_ring = R\n AlgebraicScheme_subscheme_affine.__init__(self, ambient, [poly])", "def __init__(self, df_rating, test_ratio=None, df_id_name_table=None, rating_scale=(1, 5)): \n reader = surp.Reader(rating_scale=rating_scale)\n rating_data = surp.Dataset.load_from_df(df_rating, reader)\n self.trainset = rating_data.build_full_trainset()\n \n if test_ratio is not None:\n self.trainset, self.testset = surp.model_selection.train_test_split(data=rating_data, test_size=test_ratio)\n else:\n self.trainset = rating_data.build_full_trainset()\n\n # self.__dict_id_to_name: id_1: [name1_1, name1_2...], id_2: [name2_1, name2_2....]\n # self.__dict_name_to_id: name1: [id1_1, id1_2...], name2: [id2_1, id2_2...]\n if df_id_name_table is not None:\n self.__dict_id_to_name = df_id_name_table.groupby('itemID')['itemName'].apply(lambda x: x.tolist()).to_dict()\n self.__dict_name_to_id = df_id_name_table.groupby('itemName')['itemID'].apply(lambda x: x.tolist()).to_dict()", "def init(tipo: int, factor:float):\n if tipo == 1:\n tipo = \"CHAINING\"\n else:\n tipo = \"PROBING\"\n # catalog es utilizado para interactuar con el modelo\n catalogo = model.NewCatalog(tipo,factor)\n return catalogo", "def amult(self,coef):\n new_name = self.getName() + '_' + str(coef)\n newDataTable = DataTable(new_name)\n newDataTable.setColumnUnits(self.getColumnUnits())\n col_names= self.getColumnNames()\n # Copy the first column as it\n newDataTable.addColumn(col_names[0],self.getColumn(0))\n for i in range(1,len(col_names)):\n c = self.getColumn(i)*coef\n newDataTable.addColumn(col_names[i],c)\n pass\n return newDataTable", "def set_permmodel(dat, zonelist, index, permmodel_dict):\n perm_mod = fdata.fmodel('permmodel', index=index,\n zonelist=zonelist)\n # Set required permeability\n for key, value in permmodel_dict.iteritems():\n perm_mod.param[key] = value\n dat.add(perm_mod)\n return dat", "def __init__(self, data_model: DataModel) -> None:\n\n # DataModel\n self._data_model = data_model\n\n # Dict[ParameterName, Any]\n self._param_to_value: Dict[ParameterName, Any] = {}\n\n # Dict[ParameterName, Dict[ParameterName, Any]]\n self._numbered_objects: Dict[ParameterName, Dict[ParameterName, Any]] = {}\n # If adding a PLMN object, then you would set something like\n # self._numbered_objects['PLMN_1'] = {'PLMN_1_ENABLED': True}" ]
[ "0.6066042", "0.60133684", "0.5861116", "0.5589921", "0.5528351", "0.5413231", "0.5328059", "0.52095425", "0.5183686", "0.5170293", "0.5167636", "0.5162758", "0.51383466", "0.5130969", "0.5099933", "0.5098828", "0.5098256", "0.5043545", "0.5043207", "0.50340074", "0.4991573", "0.49904713", "0.4967682", "0.49663487", "0.4955762", "0.495097", "0.49447364", "0.4942942", "0.4940075", "0.49349612" ]
0.6348616
0
csv to html conversion.
def csv_to_html(): logging.info("Converting csv to html..") df = pd.read_csv(gTAF_config.execution_summary_csv_file) df.to_html(gTAF_config.html_report_file) htmTable = df.to_html()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def csv_to_html(filepath):\r\n df = pd.read_csv(filepath, index_col=0)\r\n html = df.to_html()\r\n return html", "def csv_to_html_table_starter( csvdata ):\n # probably should use the readcsv function, above!\n html_string = '<table>\\n' # start with the table tag\n\n for element in csvdata:\n # print(\"The element is:\", element)\n # print(\"The first part of the element is:\", element[0])\n html_string += '<tr>\\n<td>' + element[0] + '</td>\\n<td>' + str(element[1]) + '</td>\\n</tr>\\n'\n # html_string += str(element) + \"\\n\" # \"place your table rows and data here!\\n\" # from list_of_rows !\n\n html_string += '</table>\\n'\n return html_string", "def csvdata():\n return render_template(\"data.html\")", "def convert_html():\n return", "def convert_csv_to_pretty_txt(csv_file_name, txt_file_name, delimiter=u\",\"):\n\n txt_table = None\n with open(csv_file_name, u\"rt\", encoding='utf-8') as csv_file:\n csv_content = csv.reader(csv_file, delimiter=delimiter, quotechar=u'\"')\n for row in csv_content:\n if txt_table is None:\n txt_table = prettytable.PrettyTable(row)\n else:\n txt_table.add_row(\n [str(itm.replace(u\"\\u00B1\", u\"+-\")) for itm in row]\n )\n if not txt_table:\n return\n\n txt_table.align = u\"r\"\n for itm in (u\"Test Case\", u\"Build\", u\"Version\", u\"VPP Version\"):\n txt_table.align[itm] = u\"l\"\n\n if txt_file_name.endswith(u\".txt\"):\n with open(txt_file_name, u\"wt\", encoding='utf-8') as txt_file:\n txt_file.write(str(txt_table))\n elif txt_file_name.endswith(u\".rst\"):\n with open(txt_file_name, u\"wt\") as txt_file:\n txt_file.write(\n u\"\\n\"\n u\".. |br| raw:: html\\n\\n <br />\\n\\n\\n\"\n u\".. |prein| raw:: html\\n\\n <pre>\\n\\n\\n\"\n u\".. |preout| raw:: html\\n\\n </pre>\\n\\n\"\n )\n txt_file.write(\n u\"\\n.. only:: html\\n\\n\"\n u\" .. csv-table::\\n\"\n u\" :header-rows: 1\\n\"\n u\" :widths: auto\\n\"\n u\" :align: center\\n\"\n f\" :file: {csv_file_name.split(u'/')[-1]}\\n\"\n )", "def csv_bootstrap_table():\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n file = request.files['file']\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n #filename = secure_filename(file.filename)\n html_text = csv_html_converter(file)\n html = Markup(html_text)\n return render_template('bootstrap_table.html', html=html, html_code=html_text)\n return render_template('form.html')", "def html_to_csv(dir):\n writer = csv.writer( open('lobbyists_new.csv', 'wb'), dialect='excel' )\n writer.writerow( ['year','name','address','address2',\n 'employer','employer_address',\n 'employer_address2'] ) # headers\n for soup in read_files_as_soup(dir):\n # find all tr tags with a certain attribute:\n table = soup.findAll( 'tr' , attrs={ 'bgcolor' : '#FFFFFF' } )\n # generator expression to get contents of all tr tags:\n records = ( record.contents for record in table )\n for record in records:\n # get table fields from the records:\n fields = [ get_innertext(node) for node in record if node != '\\n' ]\n writer.writerow(fields) # write the record's fields as a csv row\n print \"... done\"\n return", "def procesarFilaCuerpo(fila):\r\n csv = \"\"\r\n columnas = fila.split(\"</td>\")\r\n for col in columnas:\r\n csv += procesarColumnaCuerpo(col)+\";\"\r\n \r\n csv = csv[:-1] #quitar el śltimo ;\r\n print csv\r\n return csv", "def csvToTex(\n csvpath,\n na_rep=\"--\",\n float_format=_sig_figs,\n pcols=15,\n addmidrules=None,\n replaceTBrules=True,\n replacestats=True,\n):\n\n # read in the data pandas\n data = pandas.read_csv(csvpath, parse_dates=False, na_values=[na_rep])\n\n # open a new file and use pandas to dump the latex and close out\n # with open(texpath, 'w') as texfile:\n latex = data.to_latex(float_format=float_format, na_rep=na_rep, index=False)\n\n if pcols > 0:\n lines = []\n header, rest_of_file = latex.split(\"\\n\", maxsplit=1)\n\n # createa a bew header\n header_sections = header.split(\"{\")\n old_col_def = header_sections[-1][:-1]\n new_col_def = \"\"\n for n in range(len(old_col_def)):\n if n == 0:\n new_col_def = new_col_def + \"l\"\n new_col_def = new_col_def + \"x{%smm}\" % pcols\n\n lines.append(header.replace(old_col_def, new_col_def))\n\n if replaceTBrules:\n rest_of_file = rest_of_file.replace(\"\\\\toprule\", \"\\\\midrule\")\n rest_of_file = rest_of_file.replace(\"\\\\bottomrule\", \"\\\\midrule\")\n\n if replacestats:\n rest_of_file = rest_of_file.replace(\"std\", \"Std. Dev.\")\n rest_of_file = rest_of_file.replace(\"50\\\\%\", \"Median\")\n rest_of_file = rest_of_file.replace(\"25\\\\%\", \"25th Percentile\")\n rest_of_file = rest_of_file.replace(\"75\\\\%\", \"75th Percentile\")\n rest_of_file = rest_of_file.replace(\"count\", \"Count\")\n rest_of_file = rest_of_file.replace(\"mean\", \"Mean\")\n rest_of_file = rest_of_file.replace(\"min \", \"Min. \")\n rest_of_file = rest_of_file.replace(\"max\", \"Max.\")\n\n # XXX: omg hack\n rest_of_file = rest_of_file.replace(\"AluMin.um\", \"Aluminum\")\n\n if addmidrules is not None:\n if hasattr(addmidrules, \"append\"):\n for amr in addmidrules:\n rest_of_file = rest_of_file.replace(amr, \"\\\\midrule\\n%s\" % amr)\n else:\n rest_of_file = rest_of_file.replace(amr, \"\\\\midrule\\n%s\" % addmidrules)\n\n lines.append(rest_of_file)\n\n return sanitizeTex(\"\\n\".join(lines))", "def output_html(self, path):\n if path is None:\n return\n import os\n fout = codecs.open(os.path.abspath(path), 'w', encoding='utf-8')\n fout.write('<html><body><table>')\n for data in self.datas:\n fout.write('<tr><td>%s</td><td>%s</td><td>%s</td></tr>' % (data['url'], data['title'], data['summary']))\n self.datas.remove(data)\n fout.write('</table></body></html>')\n fout.close()", "def create_html(self):\n rows = self.check()\n htmlrows = \"\"\n for row in rows:\n data = self._format_row(row)\n htmlrows += data\n \n return self.TEMPLATE.format(content=htmlrows)", "def sqlhtml(input):\n output=sql(html(input))\n return output", "def _text(self, template, **kw):\n ns = dict()\n ns['csv'] = _args_to_csv\n ns['f'] = _Namespace(kw)\n return Template(template).render(**ns)", "def to_html(self, header = False):\n self.header = header\n return \"\".join(self._gen_html())", "def handlehtmlsearch_csv(querystring, keywordstring, searchlimit, searchname, cache, smartconstrain):\n fulltitle = os.path.join(os.getcwd(), 'results', 'materialsSearchCSV-WC', searchname + 'Full.csv')\n contitle = os.path.join(os.getcwd(), 'results', 'materialsSearchCSV-WC', searchname + 'Condensed.csv')\n\n if wcexists:\n if not os.path.exists(os.path.join(os.getcwd(), 'results', 'materialsSearchCSV-WC', searchname)):\n os.makedirs(os.path.join(os.getcwd(), 'results', 'materialsSearchCSV-WC', searchname))\n\n with open(fulltitle, 'wt') as csvFull, open(contitle, 'wt') as csvCon:\n fwriter = csv.writer(csvFull, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n cwriter = csv.writer(csvCon, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\n keywords, mpsearch, wokresults, keyresults = handlehtmlsearch_wok(querystring, keywordstring, searchlimit,\n cache, smartconstrain)\n\n conheader = ['Material', 'Publications', 'Space Group', 'Calculated Band Gap']\n for n in keywords:\n conheader.append(n)\n cwriter.writerow(conheader)\n\n linenum = 0\n\n for i in range(len(wokresults)):\n searchdata = wokresults[i]\n\n if wcexists:\n wc = searchWoKTools.generateabstractwc(searchdata)\n imgpath = os.path.join(os.getcwd(), 'results', 'materialsSearchCSV-WC', searchname,\n searchdata[0]['pretty_formula'] + '.png')\n wc.to_file(imgpath)\n\n fwriter.writerow([searchdata[0]['pretty_formula'],\n str(searchdata[0]['numResults']) + ' publications',\n str(searchdata[0]['spacegroup']) + ' spacegroup',\n str(searchdata[0]['band_gap']) + ' band gap',\n searchdata[0]['searchURL'],\n '=HYPERLINK(\"' + imgpath + '\",\"Word Cloud\")'])\n linenum += 1\n\n conline = [\n '=HYPERLINK(\"[' + fulltitle + ']' + searchname + 'Full' + '!A' + str(linenum) + '\",\"' +\n searchdata[0]['pretty_formula'] + '\")',\n\n str(searchdata[0]['numResults']),\n str(searchdata[0]['spacegroup']),\n str(searchdata[0]['band_gap'])]\n\n fwriter.writerow([])\n linenum += 1\n\n for key in keyresults[i].keys():\n keyrow = []\n conkeynum = 0\n for n in range(len(keyresults[i][key])):\n paper = keyresults[i][key][n]\n if paper != 0:\n cellstring = '=HYPERLINK(\"' + searchdata[1][n]['DOIlink'] + '\",\"' + key + '(' + str(\n paper) + ')\")'\n keyrow.append(cellstring)\n conkeynum += 1\n if keyrow:\n fwriter.writerow(keyrow)\n linenum += 1\n if conkeynum != 0:\n constring = '=HYPERLINK(\"[' + fulltitle + ']' + searchname + 'Full' + '!A' + str(\n linenum) + '\",\"' + str(conkeynum) + '\")'\n conline.append(constring)\n else:\n conline.append('')\n\n cwriter.writerow(conline)\n\n fwriter.writerow([])\n fwriter.writerow([])\n linenum += 2\n\n return json.dumps([os.path.join(os.getcwd(), 'results', 'materialsSearchCSV-WC', searchname)])", "def csv_header(html_path):\n yield 'ideDocumento' # this field is missing from the reference\n with open(html_path, 'rb') as file_handler:\n parsed = BeautifulSoup(file_handler.read(), 'lxml')\n for row in parsed.select('.tabela-2 tr'):\n try:\n yield row.select('td')[0].text.strip()\n except IndexError:\n pass", "def df2html(df, name=None, dom=\"Brt\", show_index=False, pageLength=15):\n\n if name is None:\n name = uuid.uuid1().time_low\n # looks like datatable does not like ID made of numbers, even in string\n # so we convert to ABCDEFGH values\n name = \"\".join([chr(65 + int(x)) for x in str(name)])\n\n datatable = DataTable(df, name, index=show_index)\n datatable.datatable.datatable_options = {\n \"pageLength\": pageLength,\n \"scrollCollapse\": \"false\",\n \"dom\": dom,\n \"buttons\": [\"copy\", \"csv\"],\n }\n\n # identify links (columns ending in _links)\n df.columns = [str(x) for x in df.columns]\n for column in df.columns:\n if column.endswith(\"_links\"):\n prefix = column.replace(\"_links\", \"\")\n if prefix in df.columns:\n datatable.datatable.set_links_to_column(column, prefix)\n\n js = datatable.create_javascript_function()\n html = datatable.create_datatable(float_format=\"%.6g\")\n return js + html", "def output_to_html(string_data):\n raise NotImplementedError(\"This function is not yet Implemented!\")", "def convert_to_html(self, news_list):\n self.logger.info(\"Converting news to HTML...\")\n self.prepare_storage()\n self.process_news_list_with_images(news_list)\n content = self.generate_html_template(news_list)\n self.write_to_file(content.encode(\"UTF-8\"))", "def get_table_download_link(df):\n csv = df.to_csv(index=False)\n b64 = base64.b64encode(csv.encode()).decode() # some strings <-> bytes conversions necessary here\n return f'<a href=\"data:file/csv;base64,{b64}\">Download csv file</a>'", "def get_table_download_link(df):\n csv = df.to_csv(index=False)\n b64 = base64.b64encode(\n csv.encode()\n ).decode() # some strings <-> bytes conversions necessary here\n return f'<a href=\"data:file/csv;base64,{b64}\" download=\"yourownquery.csv\">Download Your very Own Query Searched csv file!!!</a>'", "def html(input):\n output=atpic.cleaner_alex.clean(input)\n return output", "def get_table_download_link(df):\n csv = df.to_csv(index=False)\n b64 = base64.b64encode(\n csv.encode()\n ).decode() # some strings <-> bytes conversions necessary here\n return f'<a href=\"data:file/csv;base64,{b64}\" download=\"pattern.csv\">Download csv file</a>'", "def print_csv():\n # read lines, and make the first a link\n show_played = request.args.get('showPlayed', 'true') == 'true'\n show_out_of_office = request.args.get('showOutOfOffice', 'true') == 'true'\n songs = database.load_songs(include_played=show_played, include_out_of_office=show_out_of_office)\n entries = [_convert_first_href(str(x)) for x in songs]\n header_line = \"YouTube Link,Played,Song Name,Added by\\n\"\n return \"%s%s\" % (header_line, \"\\n\".join(entries))", "def get_table_download_link(df):\n csv = df.to_csv(index=False)\n b64 = base64.b64encode(\n csv.encode()\n ).decode() # some strings <-> bytes conversions necessary here\n return f'<a href=\"data:file/csv;base64,{b64}\" download=\"animalquery.csv\">Download csv file</a>'", "def get_table_download_link(df):\n csv = df.to_csv(index=False)\n b64 = base64.b64encode(csv.encode()).decode() # some strings <-> bytes conversions necessary here\n href = f'<a href=\"data:file/csv;base64,{b64}\" download=\"results.csv\">Download csv file</a>'\n return href", "def to_html(content):\n headers = content[0].keys()\n rows = (r.values() for r in content)\n return html_table(headers, rows)", "def csvprint(self, data):\n import csv\n import sys\n # self._assert(data) CSV data row lenght can vary\n data = self._render(data) # make elements ascii\n writer = csv.writer(sys.stdout, delimiter=',',\n quotechar='\"', \n quoting=csv.QUOTE_MINIMAL,\n lineterminator=\"\\n\")\n for row in data: writer.writerow(row)", "def get_table_download_link(df):\n csv = df.to_csv(index=False)\n # some strings <-> bytes conversions necessary here\n b64 = base64.b64encode(csv.encode()).decode()\n href = f'<a href=\"data:file/csv;base64,{b64}\" download=\"download.csv\">Download csv file</a>'\n return href", "def make_html_table(filelist):\n pre=open(\"HTML_Header.txt\").read()\n out=[]\n for file in filelist:\n x=load_file(file)[1]\n out.append(\"<tr>\")\n out.append(\"<th>{}</th>\".format(x[0]))\n out.append(\"<th>{}</th>\".format(x[2]))\n out.append(\"<th>{}</th>\".format(x[1]))\n out.append(\"<th>{}</th>\".format(x[6]))\n out.append(\"<th>{}</th>\".format(x[7]))\n out.append(\"<th>{}</th>\".format(x[8]))\n out.append(\"<th>{}</th>\".format(x[9]))\n out.append(\"<th>{}</th>\".format(x[12]))\n out.append(\"<th>link</th>\")\n out.append(\"</tr>\")\n out.append(\"</table>\")\n \n for i in range(0,len(out)):\n pre=pre+out[i]+\"\\n\"\n \n path=os.getcwd()\n os.chdir(\"Ausgabe\")\n open(\"table.html\",\"w\").write(pre)\n os.chdir(path)" ]
[ "0.7737987", "0.7237648", "0.62846726", "0.626731", "0.6242982", "0.6178104", "0.5997247", "0.57185525", "0.563922", "0.55996096", "0.5588967", "0.55867475", "0.55619586", "0.5535946", "0.5524614", "0.5508425", "0.5443588", "0.5392276", "0.53839266", "0.53739244", "0.5363368", "0.53553295", "0.5349607", "0.53411794", "0.53351927", "0.5292906", "0.52845514", "0.5278845", "0.5260912", "0.5257407" ]
0.8081197
0
Generate the name of the transformed feature from original name.
def _transformed_name(key: Text) -> Text: return key + "_xf"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_file_name(old_file_name: str) -> str:\r\n return old_file_name.split(\".\")[0] + '_features' + '.npy'", "def generate_name(self, name):\n return \"{}/{}.{}\".format(self.name, self._layer_counter, name)", "def _make_name(self, name=None):\n\n if name:\n new_name = name.split(\"/\")[-1].split(\".png\")[0]\n if new_name.startswith((\"AWS-\", \"Amazon-\")):\n new_name = new_name.split(\"-\", 1)[1]\n # Replace non-alphanumeric with underscores (1:1 mapping)\n new_name = re.sub(r'\\W+', '_', new_name)\n return new_name", "def get_feature_suffix(feature_name: str) -> str:\n if \"_\" not in feature_name:\n return \"\"\n return feature_name.split(\"_\")[-1]", "def name(self):\n name = self.function_name\n\n # Feature type is based on additional data that used\n # for example if insight is for Healthsites Facilities\n # than feature type is Healthsites Facilities\n\n if self.feature_type:\n name = '%s for %s' % (name, self.feature_type)\n return name", "def name(self):\n return f\"{self._name.replace('_', ' ')}\".title()", "def get_name() -> str:", "def get_name():", "def get_module_dict_key_from_name(name: str, feature_name_suffix: str = FEATURE_NAME_SUFFIX) -> str:\n key = name.replace(\".\", \"__ludwig_punct_period__\")\n return key + feature_name_suffix", "def name(self):\n return self._tf.name", "def get_file_name(x, feature_name, ext='npy'):\n # this is kind-of standard\n name = '.'.join(x.split('.')[:-1])\n filename = '{}.{}.{}'.format(name, feature_name, ext)\n return filename", "def get_module_dict_key_from_name(name: str, feature_name_suffix: str=FEATURE_NAME_SUFFIX) ->str:\n key = name.replace('.', '__ludwig_punct_period__')\n return key + feature_name_suffix", "def asName(self, name):\r\n\t\tnewName = \"\"\r\n\t\ttoHigher = False\r\n\t\tfor char in name:\r\n\t\t\tif char in \"_-\":\r\n\t\t\t\ttoHigher = True\r\n\t\t\telse:\r\n\t\t\t\tif toHigher:\r\n\t\t\t\t\tnewName = newName + char.upper()\r\n\t\t\t\telse:\r\n\t\t\t\t\tnewName = newName + char\r\n\t\t\t\ttoHigher = False\r\n\t\treturn newName", "def get_singlet_name(orig_name):\n return \"singlet_{}\".format(orig_name)", "def get_name():\n return \"SVMd+ - simplified approach\"", "def get_name():\n return \"SVM+\"", "def name(self):\n return 'data_extraction_for_' + '_'.join(self.names).lower()", "def simplifyOutName(name):\n return \"HLTNav_\" + name.replace(\"HLTNav_\", \"\").replace(\"Trig\", \"\").replace(\"Alg\", \"\")", "def get_name():\n return \"SVMd+\"", "def wrapper_function_name(text):\n text = GLGenerator.split_to_body_and_ext(text)\n body = text[0]\n ext = text[1]\n for suffix, replacement in FUNCTION_SUFFIXES.items():\n if body.endswith(suffix):\n body = body[:-len(suffix)] + replacement\n break\n text = body + ext\n res = util.to_snake_case(text[2:])\n return res", "def process_name(self, name, inverse=False):\n if inverse:\n return name.replace('_', ' ').title()\n return name.lower().replace(' ', '_').replace('.', '')", "def get_name() -> str:\n pass", "def TransformNames(self) -> _n_2_t_0[str]:", "def get_name(self):\n return self.normalize_name(self.name)", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():" ]
[ "0.7340735", "0.71272826", "0.6960715", "0.68877894", "0.68262595", "0.6702207", "0.6644604", "0.6562945", "0.65487856", "0.6546978", "0.65308523", "0.64949733", "0.648802", "0.6484703", "0.6465068", "0.6454453", "0.644879", "0.6445619", "0.64169896", "0.6416392", "0.63704455", "0.6369942", "0.63684475", "0.63462657", "0.63385147", "0.63385147", "0.63385147", "0.63385147", "0.63385147", "0.63385147" ]
0.7195298
1
Initialize a new Hex game, and return it
def get_new_game(game_config): _type = game_config["game_type"] if _type == "hex": game = Hex(game_config["hex"], verbose=game_config["verbose"]) else: raise ValueError("Game type is not supported") return game
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_new_game(self):\n self.game = get_new_game(self.game_config)", "def __init__(self):\n\n self.width = 10\n self.height = 10\n self.new_game()", "def __init__(self):\n self.die_a = die_class.Die(self.angry_die_faces)\n self.die_b = die_class.Die(self.angry_die_faces)\n self.game_stage = 1\n self.just_cheated_a = False\n self.just_cheated_b = False\n self.game_won = False", "def newGame(self):\n self.last_move = \"go\"\n self.values = [None for i in range(64)]\n for i in range(8):\n self.setPiece(i, 2, self.makePiece(ChessPiece.WHITE_PAWN))\n self.setPiece(i, 7, self.makePiece(ChessPiece.BLACK_PAWN))\n\n self.setPiece('a', 1, self.makePiece(ChessPiece.WHITE_ROOK))\n self.setPiece('b', 1, self.makePiece(ChessPiece.WHITE_KNIGHT))\n self.setPiece('c', 1, self.makePiece(ChessPiece.WHITE_BISHOP))\n self.setPiece('d', 1, self.makePiece(ChessPiece.WHITE_QUEEN))\n self.setPiece('e', 1, self.makePiece(ChessPiece.WHITE_KING))\n self.setPiece('f', 1, self.makePiece(ChessPiece.WHITE_BISHOP))\n self.setPiece('g', 1, self.makePiece(ChessPiece.WHITE_KNIGHT))\n self.setPiece('h', 1, self.makePiece(ChessPiece.WHITE_ROOK))\n\n self.setPiece('a', 8, self.makePiece(ChessPiece.BLACK_ROOK))\n self.setPiece('b', 8, self.makePiece(ChessPiece.BLACK_KNIGHT))\n self.setPiece('c', 8, self.makePiece(ChessPiece.BLACK_BISHOP))\n self.setPiece('d', 8, self.makePiece(ChessPiece.BLACK_QUEEN))\n self.setPiece('e', 8, self.makePiece(ChessPiece.BLACK_KING))\n self.setPiece('f', 8, self.makePiece(ChessPiece.BLACK_BISHOP))\n self.setPiece('g', 8, self.makePiece(ChessPiece.BLACK_KNIGHT))\n self.setPiece('h', 8, self.makePiece(ChessPiece.BLACK_ROOK))", "def init_game():\n return BoardRenderer('LifeSim', GRID_SIZE, BLOCK_SIZE), World(GRID_SIZE, LAKE_SIZE, FOREST_WIDTH)", "def init(seed=None):\n\tglobal _game\n\n\tfrom .game import Game\n\tfrom .prompt import install_words\n\n\t_game = Game(seed)\n\tload_advent_dat(_game)\n\tinstall_words(_game)\n\t_game.start()\n\treturn _game", "def __init__(self):\n self._game_state = \"UNFINISHED\"\n self._current_player = \"BLACK\"\n self._game_board = Board()", "def make_game(self):\n game = Game(self.data['gamename'])\n self.game = game\n return game", "def new_game(self):\n\n self.board = {}", "def new_game(self):\n self.ui = UI()\n self.board.retract_board()\n self.board = Board()\n self.turn = BLUE\n self.selected_legal_moves = []\n self.selected_piece = None", "def __init__(self):\n self.screen = pg.display.get_surface()\n self.screen_rect = self.screen.get_rect()\n self.clock = pg.time.Clock()\n self.fps = 60\n self.keys = pg.key.get_pressed()\n self.done = False\n # ship = random.choice(list(prepare.GFX[\"ships\"].values()))\n ship = list(prepare.GFX[\"ships\"].values())[7] # pick first ship available\n self.player = actors.Player((0, 0), ship)\n self.level = level.Level(self.screen_rect.copy(), self.player)\n\n self.energyloss_counter = 0\n self.energygain_counter = 0", "def newGame(self):\n self.last_move = \"go\"\n self.values = [None for i in range(64)]\n for i in range(8):\n self.setPiece(i, 2, self.makePiece(ChessPiece.WHITE_PAWN, i, 2, \"wpawn\"+str(i)))\n self.setPiece(i, 7, self.makePiece(ChessPiece.BLACK_PAWN, i, 7, \"bpawn\"+str(i)))\n\n self.setPiece('a', 1, self.makePiece(ChessPiece.WHITE_ROOK, 'a', 1, \"wrook0\"))\n self.setPiece('b', 1, self.makePiece(ChessPiece.WHITE_KNIGHT, 'b', 1, \"wknight0\"))\n self.setPiece('c', 1, self.makePiece(ChessPiece.WHITE_BISHOP, 'c', 1, \"wbishop0\"))\n self.setPiece('d', 1, self.makePiece(ChessPiece.WHITE_QUEEN, 'd', 1, \"wqueen\"))\n self.setPiece('e', 1, self.makePiece(ChessPiece.WHITE_KING, 'e', 1, \"wking\"))\n self.setPiece('f', 1, self.makePiece(ChessPiece.WHITE_BISHOP, 'f', 1, \"wbishop1\"))\n self.setPiece('g', 1, self.makePiece(ChessPiece.WHITE_KNIGHT, 'g', 1, \"wknight1\"))\n self.setPiece('h', 1, self.makePiece(ChessPiece.WHITE_ROOK, 'h', 1, \"wrook1\"))\n\n self.setPiece('a', 8, self.makePiece(ChessPiece.BLACK_ROOK, 'a', 8, \"brook0\"))\n self.setPiece('b', 8, self.makePiece(ChessPiece.BLACK_KNIGHT, 'b', 8, \"bknight0\"))\n self.setPiece('c', 8, self.makePiece(ChessPiece.BLACK_BISHOP, 'c', 8, \"bbishop0\"))\n self.setPiece('d', 8, self.makePiece(ChessPiece.BLACK_QUEEN, 'd', 8, \"bqueen\"))\n self.setPiece('e', 8, self.makePiece(ChessPiece.BLACK_KING, 'e', 8, \"bking\"))\n self.setPiece('f', 8, self.makePiece(ChessPiece.BLACK_BISHOP, 'f', 8, \"bbishop1\"))\n self.setPiece('g', 8, self.makePiece(ChessPiece.BLACK_KNIGHT, 'g', 8, \"bknight1\"))\n self.setPiece('h', 8, self.makePiece(ChessPiece.BLACK_ROOK, 'h', 8, \"brook1\"))", "def __init__(self):\n\n super().__init__()\n self.setup_janggi_game()\n self._game_state = 'UNFINISHED'\n self._player_turn = 'BLUE'", "def game_start():\n herolist = Hero_List(hots_db)\n heroclasses = []\n for item in herolist:\n heroclasses.append(Item(item, 'hero'))\n curgame = Game(Team('home'), Team('enemy'), Team('hero_pool', heroclasses), '')\n return curgame", "def setup_game(self):", "def __init__(self):\n\n self.__turn_info = { 'turn': ChessGame.WHITE }\n self.init_board()", "def initGame(width=19):\n state = np.zeros((width, width, 2))\n available = np.zeros((width, width))\n\n return state, available", "def _initialize_game(wager_credits):\n player = {}\n player['chips'] = wager_credits\n player['round'] = 0\n player['blackjack'] = 0\n player['won'] = 0\n player['lost'] = 0\n player['push'] = 0\n player['bust'] = 0\n return player", "def make_game():\n return ascii_art.ascii_art_to_game(\n GAME_ART, what_lies_beneath='.',\n sprites={'P': PlayerSprite})", "def start_state():\n return chess.Board()", "def new_game(self):\n self.board = [None] * 9\n self.player = \"X\"\n self.winner = None", "def __init__(self):\n self.game_screen = pygame.display.set_mode((GameData.screen_dim, GameData.screen_dim))\n self.game_screen.fill(GameData.background_color)\n self.player = 1\n self.game_over = False\n self.board = np.zeros((GameData.rows, GameData.columns))", "def start_game(self):\n self.board = Board(num_tableaus=self.tableau_qty, num_decks=self.decks, deal_3=self.deal_3)\n self.board.init_move_dict()\n self.board.deal(self.deck)\n\n if self.api_use:\n self.init_game_api()\n elif self.commandline:\n self.init_cl_game()\n else:\n self.init_pygame()", "def __init__(self):\n self._current_state = \"UNFINISHED\"\n self._start_color = \"RED\"\n self._board = Board()", "def initial_state(self):\n state = GameState(self.size)\n return state", "def __init__(self, turn, game):\n\t\tself.turn = turn\n\t\tself.game = game\n\t\tself.gameBoard = game.returnBoard()", "def new_game(self) -> \"State\":\n return State(self, self.__sim.new_game())", "def __init__(self):\n\t\tself.current = Piece.EX\n\t\tself.board = [Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK]", "def _init_game(self):\n state, player_id = self.game.init_game()\n if self.record_action:\n self.action_recorder = []\n return self._extract_state(state), player_id", "def __init__(self):\n self.opening_scene = DungeonGate()\n # this list define the order of scenes in the corridor\n self.corridor_scenes = [GuardsRoom(), Cell(), Armory(), EmptyRoom(), Dormitory()]\n shuffle(self.corridor_scenes)\n self.explored_scenes = {\n \"GuardsRoom\": \"unexplored\",\n \"Cell\": \"unexplored\",\n \"Dormitory\": \"unexplored\",\n \"Armory\": \"unexplored\",\n \"EmptyRoom\": \"unexplored\",\n \"DungeonGate\": \"unexplored\"\n }" ]
[ "0.64138275", "0.6336094", "0.6329723", "0.6318369", "0.6295797", "0.6237595", "0.62131053", "0.61938787", "0.61470306", "0.61469275", "0.61450803", "0.61135393", "0.60844964", "0.60540015", "0.60495573", "0.6031003", "0.5994315", "0.5986799", "0.5981197", "0.5936789", "0.5931988", "0.59311175", "0.59288913", "0.5889255", "0.58643013", "0.5860408", "0.58573735", "0.5815535", "0.58106786", "0.58086693" ]
0.6521602
0
Attempts to read auth models go to auth_db.
def db_for_read(self, model, **hints): if self.isAdminApp(model): return 'auth_db' return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def auth(session, db):\n\tif (session.auth != None) and db(db.User.id == session.auth).count() == 1:\n\t\treturn User(session.auth, db)\n\telse:\n\t\treturn None", "def _load_db(self):\n for type_ in self._types:\n try:\n type_.table(self._metadata)\n except InvalidRequestError:\n pass\n # Reflect metadata so auto-mapping works\n self._metadata.reflect(self._engine)\n # Make sure the tables exist\n self._metadata.create_all()", "def get_auth_db(authdb_path,\n database_metadata=AUTHDB_META,\n echo=False):\n\n # if this is an SQLite DB, make sure to check the auth DB permissions before\n # we load it so we can be sure no one else messes with it\n potential_file_path = authdb_path.replace('sqlite:///','')\n\n if os.path.exists(potential_file_path):\n\n fileperm = oct(os.stat(potential_file_path)[stat.ST_MODE])\n\n #if not (fileperm == '0100600' or fileperm == '0o100600'):\n # raise IOError('incorrect permissions on auth DB, will not load it')\n\n @event.listens_for(Engine, \"connect\")\n def set_sqlite_pragma(dbapi_connection, connection_record):\n cursor = dbapi_connection.cursor()\n cursor.execute(\"PRAGMA foreign_keys=ON\")\n cursor.close()\n\n engine = create_engine(authdb_path, echo=echo)\n database_metadata.bind = engine\n conn = engine.connect()\n\n return engine, conn, database_metadata", "def getauth_from_db(args):\n global logger\n global tool_names\n\n if (args['api_key'] != api_key):\n logger.info('api_key invalid')\n return auth_respond(1,'api_key invalid','')\n \n if args['card_id'] in all_access_card_ids_list:\n # found in all_access_card_ids_list. Grant access\n logger.info('granted: all_access_card')\n return auth_respond(0,'granted: all_access_card','')\n else:\n #\n # search db for card id\n #\n user_db_parsed = get_user_db()\n #sys.stderr.write(pprint.pformat(user_db_parsed) + '\\n')\n found_card_id = found_tool_auth = False \n for u in user_db_parsed:\n (db_card_id,db_user_name,db_tool_auths) = u\n if (db_card_id == args['card_id']):\n #\n # if we found the card id\n #\n found_card_id = True\n db_tool_auth_list = db_tool_auths.split(':')\n\n for db_tool in db_tool_auth_list:\n if db_tool == args['tool_id']:\n found_tool_auth = True\n #\n # and they are authorized\n #\n logger.info('getauth_from_db: found record. ' + \n ' db_card_id:' + db_card_id +\n ', user_name:' + db_user_name + \n ', tool_auths:' + db_tool_auths\n )\n #\n # then grant access\n #\n return auth_respond(0,'granted',db_user_name,db_tool_auths)\n\n if (not found_card_id):\n return auth_respond(1,'denied: unknown card','')\n if (not found_tool_auth):\n return auth_respond(1,'denied: not authorized','')\n\n return auth_respond(1, 'error: badly-formed request' )", "def init_authsets(clean_db, api_client_mgmt):\n return do_init_authsets(api_client_mgmt)", "def test_models(self):\n\n r = self.app.datastore.find_role('admin')\n self.assertIsNotNone(r)\n self.assertIsInstance(r, Role)\n self.assertEqual('admin', r.name)\n\n u = self.app.datastore.find_user(email='[email protected]')\n self.assertIsNotNone(u)\n self.assertIsInstance(u, User)\n self.assertEqual('[email protected]', u.email)\n self.assertTrue(u.has_role('admin'))", "def user_model(self): \n return self.auth.store.user_model", "def load_user():\n\n for i, row in enumerate(open(\"seed_data/role.user\")):\n row = row.rstrip()\n name, description = row.split(\"|\")\n role = RoleModel(name=name, description=description)\n db.session.add(role)\n\n for i, row in enumerate(open(\"seed_data/user.user\")):\n row = row.rstrip()\n name, phone, email, password, confirmed_at, role_id = row.split(\"|\")\n user = UserModel(name=name,\n phone=phone,\n email=email,\n password=password,\n confirmed_at=confirmed_at,\n role_id=role_id)\n db.session.add(user)\n\n # for i, row in enumerate(open(\"seed_data/order.user\")):\n # row = row.rstrip()\n # active, user_id, product_location_id = row.split(\"|\")\n # order = OrderrModel(\n # active=active, \n # user_id=user_id, \n # product_location_id=product_location_id)\n # db.session.add(order)\n\n db.session.commit()", "def setup_read(self, db, randomize_access=False):\n self.db = db\n self.randomize_access = randomize_access\n self.can_read = True", "def load_initial_data(apps, schema_editor):\n\n\n #\n # get the model by name\n User = apps.get_model('auth', 'User')\n password = User.objects.make_random_password()\n\n\n draftboard = User()\n draftboard.username= settings.USERNAME_DRAFTBOARD\n draftboard.password = make_password(password)\n draftboard.is_superuser = False\n draftboard.is_staff = True\n draftboard.save()\n\n escrow = User()\n escrow.username = settings.USERNAME_ESCROW\n escrow.password= make_password(password)\n escrow.is_superuser = False\n escrow.is_staff = True\n escrow.save()", "def test_read_user_identity_mapping(self):\n pass", "def get_all_auths(self):\n return self.all_auths", "def authentication(app, user_model):\n login_manager.login_message = \"Please login to access this page.\"\n login_manager.login_view = 'auth.login'\n login_manager.session_protection = 'strong'\n login_manager.login_message_category = 'danger'\n\n @login_manager.user_loader\n def load_user(user_id):\n return user_model.query.get(int(user_id))", "def models(config_path):\n autograder.setup_app(config_path)\n\n # Now that setup has occurred, we can import the models\n from autograder import models as m\n\n # Make sure that if we've used a different db setup in another module\n # we don't keep trying to write to that database\n m.db.session.remove()\n\n m.drop_all()\n m.create_all()\n return m", "def get_auths(self):\n return self.__auths", "def _auth_metadata_get(self, auth_id):\n auth_metadata = self._metadata_get(self._auth_metadata_path(auth_id))\n\n if auth_metadata:\n self._check_compat_version(auth_metadata['compat_version'])\n\n return auth_metadata", "def _init_auth(\n self,\n auth: t.Optional[AuthModel] = None,\n log_level: t.Union[str, int] = LOG_LEVEL_AUTH,\n ) -> AuthModel:\n if not isinstance(auth, AuthModel):\n if self.CREDENTIALS:\n auth: AuthCredentials = AuthCredentials(\n username=self.__key,\n password=self.__secret,\n http=self.http,\n log_level=log_level,\n )\n else:\n auth: AuthApiKey = AuthApiKey(\n key=self.__key,\n secret=self.__secret,\n http=self.http,\n log_level=log_level,\n )\n return self._check_binding(auth)", "def __init__(self):\n self.db = ALL_USERS", "def iDb(self):\n try:\n self.db.importDb()\n self.accept()\n except PermissionError:\n self.reject()", "def check_auth():", "def init():\n create_user(app)\n get_all_user()", "def before_request():\n g.db = models.DATABASE\n g.db.connect()\n g.user = current_user", "def before_request():\n g.db = models.DATABASE\n g.db.connect()\n g.user = current_user", "def retrieve_from_db(self):\n pass", "def test_retrieve(self):\n users = CalendallUser.objects.all()\n self.check_attrs_helper(users)", "def find_model(self, models):\n \"\"\"maps to model fields\"\"\"\n self.model = models.get_by_name(self.model.name)\n for f in self.included_fields:\n\n if f.is_model_field() and f.name != 'username':\n f._django_field = self.model.fields.get_by_name(f.model_field.name)\n f.post_name = f._django_field.identifier()\n else:\n assert f.name in ['username', 'password', 'password1', 'password2'] # these are the only non model fields we support\n f._django_field = None\n f.post_name = f.name", "def auths(self):\n return self._auths", "def auths(self):\n return self._auths", "def test_authenticated_user_read(self):\r\n with self.flask_app.test_request_context('/'):\r\n for token in self.auth_providers:\r\n assert_raises(Forbidden,\r\n getattr(require, 'token').read,\r\n token)", "def init_db(self):\n\n cursor = self._db_connection.cursor()\n\n cursor.execute('''\n CREATE TABLE IF NOT EXISTS users (\n uid INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n username VARCHAR(255) UNIQUE NOT NULL,\n full_name VARCHAR(255),\n password VARCHAR(255) NOT NULL\n );\n \n CREATE TABLE IF NOT EXISTS access_control (\n access_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n uid INTEGER NOT NULL,\n privilege VARCHAR(255) NOT NULL,\n UNIQUE (uid, privilege),\n CONSTRAINT fk_associated_user FOREIGN KEY (uid) REFERENCES users (uid)\n );\n \n CREATE TABLE IF NOT EXISTS tokens (\n token_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n token_content VARCHAR(255) NOT NULL,\n expiration_datetime VARCHAR(255),\n uid INTEGER NOT NULL,\n CONSTRAINT fk_associated_user FOREIGN KEY (uid) REFERENCES users (uid)\n );\n \n CREATE TABLE IF NOT EXISTS courses (\n course_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n course_abbreviation VARCHAR(255) NOT NULL,\n course_name VARCHAR(255) NOT NULL,\n instructor_id INTEGER NOT NULL,\n time VARCHAR(255) NOT NULL,\n seats INTEGER NOT NULL,\n CONSTRAINT fk_instructors FOREIGN KEY (instructor_id) REFERENCES users (uid)\n );\n \n CREATE TABLE IF NOT EXISTS enrollment_records (\n enrollment_id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n uid INTEGER NOT NULL,\n course_id INTEGER NOT NULL,\n grade NUMERIC NOT NULL DEFAULT 100.0,\n UNIQUE (uid, course_id),\n CONSTRAINT fk_associated_user FOREIGN KEY (uid) REFERENCES users (uid),\n CONSTRAINT fk_associated_course FOREIGN KEY (course_id) references courses (course_id)\n );\n ''')" ]
[ "0.56026596", "0.5539494", "0.54292023", "0.5427826", "0.5398735", "0.53198487", "0.52996194", "0.5299112", "0.525122", "0.52342856", "0.5227345", "0.5202427", "0.5201855", "0.5185841", "0.5167866", "0.5156039", "0.51369697", "0.51359546", "0.51079786", "0.50896907", "0.5083385", "0.5079477", "0.5079477", "0.50780946", "0.5075834", "0.505879", "0.505079", "0.505079", "0.5048215", "0.5046736" ]
0.611817
0
Allow relations if a model in the auth app is involved.
def allow_relation(self, obj1, obj2, **hints): if self.isAdminApp(obj1) or self.isAdminApp(obj2): return True return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def allow_relation(self, obj1, obj2, **hints):\n\n result = (obj1._meta.model_name in DefaultRouting.defaultModels and \n obj2._meta.model_name in DefaultRouting.defaultModels)\n return result", "def allow_relation(self, obj1, obj2, **hints):\n return True", "def allow_relation(self, obj1, obj2, **hints):\n if obj1._meta.app_label == 'data_collection' or \\\n obj2._meta.app_label == 'data_collection':\n return True\n return None", "def allow_relation(self, obj1, obj2, **hints):\n\n result = False\n if not (obj1._meta.model_name in GeoSpatialRouting.includedModels and \n obj2._meta.model_name in GeoSpatialRouting.includedModels) :\n result = None\n return result", "def allow_relation(self, obj1, obj2, **hints):\n if obj1._meta.app_label == 'eotrts_student' or \\\n obj2._meta.app_label == 'eotrts_student':\n return True\n return None", "def allow_relation(self, obj1, obj2, **hints):\n return None", "def allow_relation(self, obj1, obj2, **hints):\n if obj1._meta.app_label in self.route_app_labels or obj2._meta.app_label in self.route_app_labels:\n return True\n return None", "def allow_relation(self, obj1, obj2, **hints):\n\t\tif obj1._meta.app_label == 'product' or \\\n\t\t obj2._meta.app_label == 'product':\n\t\t return True\n\t\treturn None", "def allow_relation(self, obj1, obj2, **hints):\n if (\n obj1._meta.app_label in self.route_app_labels or\n obj2._meta.app_label in self.route_app_labels\n ):\n return True\n return None", "def allow_relation(self, obj1, obj2, **hints):\n if obj1._meta.app_label == self.app_label or \\\n obj2._meta.app_label == self.app_label:\n return True\n return None", "def allow_relation(self, obj1, obj2, **hints):\n if (obj1._meta.app_label == obj2._meta.app_label):\n return True\n else:\n return None", "def allow_relation(self, obj1, obj2, **hints):\r\n if obj1._meta.app_label == self.APP_LABEL or obj2._meta.app_label == self.APP_LABEL:\r\n return True\r\n return None", "def allow_relation(self, obj1, obj2, **hints):\n return self._route_by_model_type(obj1) == self._route_by_model_type(obj2)", "def allow_relation(self, obj1, obj2, **hints):\n if obj1._meta.app_label == 'test' or \\\n obj2._meta.app_label == 'test':\n return True\n return None", "def allow_relation(self, obj1, obj2, **hints):\n\n if obj1._state.db == obj2._state.db:\n return True\n return False", "def allow_relation(self, obj1, obj2, **hints):\n if (\n obj1._meta.label_lower in self.route_encuestas or\n obj2._meta.label_lower in self.route_encuestas\n ):\n return True\n return None", "def allow_relation(self, obj1, obj2, **hints):\n if obj1._meta.app_label == 'emissions' or \\\n obj2._meta.app_label == 'emissions':\n return True\n return None", "def allow_relation(self, obj1, obj2, **hints):\n if obj1._meta.app_label == 'researcherquery' and obj2._meta.app_label == 'researcherquery':\n return True\n return None", "def can_manage_relationship_aliases(self):\n # NOTE: It is expected that real authentication hints will be\n # handled in a service adapter above the pay grade of this impl.\n return True", "def allow_relation(self, obj1, obj2, **hints):\n if obj1._state.db in self.pool and obj2._state.db in self.pool:\n return True\n return None", "def isRelated(self):\n return len(self.user_storage.all()) > 0", "def can_create_relationships(self):\n # Implemented from template for\n # osid.resource.ResourceAdminSession.can_create_resources\n # NOTE: It is expected that real authentication hints will be\n # handled in a service adapter above the pay grade of this impl.\n return True", "def relation_exists(cls, model):\n return bool(cls.get_related_field(model)\n or cls.get_reverse_related_field(model))", "def allow_relation(self, obj1, obj2, **hints):\n if obj1._meta.db_tablespace == 'emarket' or \\\n obj2._meta.db_tablespace == 'emarket':\n return True\n return None", "def on_model_change(self, form, model, is_created):\n if not current_user.is_active or not current_user.is_authenticated:\n abort(403)\n elif is_created:\n if not user_has_permission(current_user, 'can_create','admins'):\n abort(403)\n else:\n if not user_has_permission(current_user, 'can_edit','admins'):\n abort(403)", "def on_model_change(self, form, model, is_created):\n if not current_user.is_active or not current_user.is_authenticated:\n abort(403)\n elif is_created:\n if not user_has_permission(current_user, 'can_create', 'advisors'):\n abort(403)\n else:\n if not user_has_permission(current_user, 'can_edit', 'advisors'):\n abort(403)", "def can_delete_relationships(self):\n # Implemented from template for\n # osid.resource.ResourceAdminSession.can_delete_resources\n # NOTE: It is expected that real authentication hints will be\n # handled in a service adapter above the pay grade of this impl.\n return True", "def on_model_change(self, form, model, is_created):\n if not current_user.is_active or not current_user.is_authenticated:\n abort(403)\n elif is_created:\n if not user_has_permission(current_user, 'can_create','specialties'):\n abort(403)\n else:\n if not user_has_permission(current_user, 'can_edit','specialties'):\n abort(403)", "def save_model(self, request, obj, form, change):\n try:\n owner = form.instance.owner\n except models.Application.owner.RelatedObjectDoesNotExist:\n form.instance.owner = request.user\n\n super().save_model(request, obj, form, change)", "def allow_relation(self, obj1, obj2, **hints):\n\n object1_databases = get_possible_databases_for_model(model=obj1._meta.model)\n object2_databases = get_possible_databases_for_model(model=obj2._meta.model)\n\n if (len(object1_databases) == len(object2_databases) == 1) and (object1_databases == object2_databases):\n return True\n return self.get_shard_for_instance(obj1) == self.get_shard_for_instance(obj2)" ]
[ "0.70190156", "0.6990835", "0.6919564", "0.6906994", "0.69009876", "0.6783787", "0.67391515", "0.67210835", "0.6704985", "0.6668281", "0.6638815", "0.6542443", "0.6541121", "0.65383524", "0.64997756", "0.64021367", "0.6397948", "0.63407576", "0.624081", "0.6170405", "0.600265", "0.59896773", "0.5952329", "0.5881211", "0.5737481", "0.5671514", "0.56571287", "0.5606396", "0.5554369", "0.5545847" ]
0.7264568
0
Find files changed in certain revisions. The function passes `revish` directly to `git diff`, so `revish` can have a variety of forms; see `git diff help` for details. Files in the diff that are matched by `ignore_rules` are excluded.
def files_changed(revish: Text, ignore_rules: Optional[Sequence[Text]] = None, include_uncommitted: bool = False, include_new: bool = False ) -> Tuple[List[Text], List[Text]]: files = repo_files_changed(revish, include_uncommitted=include_uncommitted, include_new=include_new) if not files: return [], [] return exclude_ignored(files, ignore_rules)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def changes(self, files=[], rev=None, change=None, text=False,\n reverse=False, ignore_all_space=False, ignore_space_change=False,\n ignore_blank_lines=False, context=None, subrepos=False,\n include=None, exclude=None): \n return diffparser.parse(self.diff(files=files, rev=rev, change=change,\n text=text, git=True, reverse=reverse,\n ignore_all_space=ignore_all_space,\n ignore_space_change=ignore_space_change,\n ignore_blank_lines=ignore_blank_lines,\n unified=context, subrepos=subrepos,\n include=include, exclude=exclude))", "def diff(self, files=[], rev=None, change=None, text=False,\n git=False, nodates=False, show_function=False, reverse=False,\n ignore_all_space=False, ignore_space_change=False,\n ignore_blank_lines=False, unified=None,\n stat=False, subrepos=False, include=None, exclude=None):\n if change and rev:\n raise ValueError('cannot specify both change and rev')\n\n files = self._map_files(files)\n rev = self._map_revs(rev)\n change = self._map_one_rev(change)\n\n out = self._client.execute('diff', files, r=rev, c=change,\n a=text, g=git, nodates=nodates,\n p=show_function, reverse=reverse,\n w=ignore_all_space, b=ignore_space_change,\n B=ignore_blank_lines, U=unified, stat=stat,\n S=subrepos, I=include, X=exclude,\n binary=True)\n\n return out", "def get_changed_paths(*args, globs=None):\n if globs:\n args = list(args) + [\"--\", *globs]\n diff_output = git(\"diff\", \"--name-only\", *args)\n\n return set([line.strip() for line in diff_output.splitlines()])", "def analyze_revs(target_pathid, url, begin=1, end=None,\n find_reflected=False):\n\n begin = str(begin)\n if end is None:\n end = \"HEAD\"\n else:\n end = str(end)\n if long(begin) > long(end):\n return RevisionSet(\"\"), RevisionSet(\"\"), \\\n RevisionSet(\"\"), RevisionSet(\"\")\n\n logs[url] = RevisionLog(url, begin, end, find_reflected)\n revs = RevisionSet(logs[url].revs)\n\n if end == \"HEAD\":\n # If end is not provided, we do not know which is the latest revision\n # in the repository. So return the phantom revision set only up to\n # the latest known revision.\n end = str(list(revs)[-1])\n\n phantom_revs = RevisionSet(\"%s-%s\" % (begin, end)) - revs\n\n if find_reflected:\n reflected_revs = logs[url].merge_metadata().changed_revs(target_pathid)\n reflected_revs += logs[url].block_metadata().changed_revs(target_pathid)\n else:\n reflected_revs = []\n\n initialized_revs = RevisionSet(logs[url].merge_metadata().initialized_revs())\n reflected_revs = RevisionSet(reflected_revs)\n\n return revs, phantom_revs, reflected_revs, initialized_revs", "def diff(root_path, verbose, ignore_list, ignore_spec_file):\n diff_entire_folder_against_full_history_subcommand(root_path, verbose, ignore_list, ignore_spec_file)\n return", "def _on_watch_changes(self, *changes):\n self.dirty = self._git.is_dirty()\n if self._watcher:\n for change in self._watcher.changes:\n for tracker in self._trackers:\n tracked_path = Path(self._git.working_dir) / change[\"path\"]\n if tracker.path.resolve() == tracked_path.resolve():\n tracker._on_file_change(None)\n return [\n dict(a_path=diff.a_path, b_path=diff.b_path, change_type=diff.change_type)\n for diff in self._git.index.diff(None)\n ] + [\n dict(a_path=None, b_path=ut, change_type=\"U\")\n for ut in self._git.untracked_files\n ]", "def svn_diff_file_diff(*args):\n return _diff.svn_diff_file_diff(*args)", "def git_get_modified_files(\n paths: Iterable[Path], revrange: RevisionRange, cwd: Path\n) -> Set[Path]:\n relative_paths = {p.resolve().relative_to(cwd) for p in paths}\n str_paths = [path.as_posix() for path in relative_paths]\n if revrange.use_common_ancestor:\n rev2 = \"HEAD\" if revrange.rev2 == WORKTREE else revrange.rev2\n merge_base_cmd = [\"merge-base\", revrange.rev1, rev2]\n rev1 = _git_check_output_lines(merge_base_cmd, cwd)[0]\n else:\n rev1 = revrange.rev1\n diff_cmd = [\n \"diff\",\n \"--name-only\",\n \"--relative\",\n rev1,\n # revrange.rev2 is inserted here if not WORKTREE\n \"--\",\n *str_paths,\n ]\n if revrange.rev2 != WORKTREE:\n diff_cmd.insert(diff_cmd.index(\"--\"), revrange.rev2)\n lines = _git_check_output_lines(diff_cmd, cwd)\n if revrange.rev2 == WORKTREE:\n ls_files_cmd = [\n \"ls-files\",\n \"--others\",\n \"--exclude-standard\",\n \"--\",\n *str_paths,\n ]\n lines.extend(_git_check_output_lines(ls_files_cmd, cwd))\n changed_paths = (Path(line) for line in lines)\n return {path for path in changed_paths if should_reformat_file(cwd / path)}", "def _get_changed_files():\n if not ci_diff_helper:\n return None\n\n try:\n config = ci_diff_helper.get_config()\n except OSError: # Not on CI.\n return None\n\n changed_files = ci_diff_helper.get_changed_files('HEAD', config.base)\n\n changed_files = set([\n './{}'.format(filename) for filename in changed_files])\n\n return changed_files", "def bisect_revisions():\n result = run(\n [\"git\", \"bisect\", \"visualize\", \"--oneline\"],\n stdout=PIPE,\n stderr=PIPE,\n encoding=\"utf-8\",\n )\n result.check_returncode()\n lines = result.stdout.splitlines()\n interesting = [line for line in lines if \"refs/bisect/skip\" not in line]\n # the earliest known bad commit will be included in the bisect view\n return len(interesting) - 1", "def diff(self, rev=None):\r\n args = []\r\n if rev is not None:\r\n args.append(\"-r %d\" % rev)\r\n out = self._authsvn('diff', args)\r\n return out", "def get_modified_files(repo, args):\n commit = repo.commit(args.commit)\n return commit.stats.files", "def svn_diff_diff(*args):\n return _diff.svn_diff_diff(*args)", "def get_changed_files(path_to_repository, ignore_subrepositories):\n diff = _get_diff_to_last_commit(path_to_repository, ignore_subrepositories)\n return [item.b_path for item in diff if item.change_type in _CHANGE_TYPES_CONSIDERED_FOR_PRECOMMIT]", "def get_changed_files():\n upstream = \"origin/master\"\n local_commit = subprocess.check_output(\n \"git rev-list HEAD ^{} -- 2>/dev/null | tail -1\".format(upstream),\n shell=True).strip().decode()\n diff_base = subprocess.check_output(\n ['git', 'rev-parse', local_commit +\n '^']).strip().decode() if local_commit else \"HEAD\"\n files = subprocess.check_output(['git', 'diff', '--name-only',\n diff_base]).strip().decode().split('\\n')\n\n repo = subprocess.check_output(['git', 'rev-parse',\n '--show-toplevel']).strip().decode()\n # add prefixes so that all and targets can be specified relative to FUCHSIA_DIR\n if repo.endswith('topaz'):\n files = [os.path.join('topaz', p) for p in files]\n elif repo.endswith('third_party/go'):\n files = [os.path.join('third_party/go', p) for p in files]\n\n return files", "def getMissingRevisionsDiff(self, docId, docRevs):\n return self.client.post(self.name +\"/_revs_diff\", None,\n {docId: docRevs}).getBodyData()", "def _filter_diff(diff, include_list, exclude_list=()):\n filtered = []\n for d in diff:\n if (d.status != 'D' and\n _match_regex_list(d.file, include_list) and\n not _match_regex_list(d.file, exclude_list)):\n # We've got a match!\n filtered.append(d)\n return filtered", "def svn_fs_paths_changed(*args):\r\n return _fs.svn_fs_paths_changed(*args)", "def _get_changes_not_staged_for_commit(wit_path):\n\n files = {os.path.relpath(file, wit_path):\n get_full_path(file, '.wit', 'staging_area')\n for file in _get_all_files_names(wit_path)}\n\n for file in _get_staging_area_files(wit_path):\n if os.path.relpath(file, wit_path) in files:\n yield {os.path.relpath(file, wit_path): _compare_file(file, files[os.path.relpath(file, wit_path)])}", "def diff_cached(rev):\n # We use -z to handle filenames with spaces, tabs, etc.\n cmd = ['git', 'diff', '--cached', '--diff-filter=AM', '--raw', '-z' ]\n if rev:\n cmd.append(rev)\n popen = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n # Parse the '\\0' terminated filenames out of the metadata\n output = popen.communicate()[0].split('\\0')\n for i in xrange(0, len(output)-2, 2):\n meta, filename = output[i:i+2]\n yield meta.split() + [ filename ]", "def svn_fs_contents_changed(*args):\r\n return _fs.svn_fs_contents_changed(*args)", "def file_changes(self):\n new = []\n changed = []\n deleted = []\n parent = self.parent_tag\n # Loop through the files and find the ones that have changed\n for relative_path, file_dict in self.checksum[\"files\"].items():\n if relative_path not in parent[\"files\"]:\n new.append(relative_path)\n elif file_dict[\"checksum\"] != parent[\"files\"][relative_path][\"checksum\"]:\n changed.append(relative_path)\n # Loop through the parent files and see which files have been deleted\n for relative_path in parent[\"files\"].keys():\n if relative_path not in self.checksum[\"files\"]:\n deleted.append(relative_path)\n return {\"new\": new, \"changed\": changed, \"deleted\": deleted}", "def _git_diff_files(ref=\"master\"):\n result = []\n command = [\"git\", \"diff\", \"--name-status\", \"%s\" % (ref)]\n exit_code, output = _execute(command)\n if exit_code != 0:\n print(\"Failed to diff files.\")\n sys.exit(1)\n\n for line in output.decode(\"utf-8\").splitlines():\n parts = line.split(\"\\t\")\n action = parts[0]\n name = parts[-1]\n action = action.lower()\n result.append((action, name))\n\n return result", "def svn_client_diff(apr_array_header_t_diff_options, char_path1, svn_opt_revision_t_revision1, char_path2, svn_opt_revision_t_revision2, svn_boolean_t_recurse, svn_boolean_t_ignore_ancestry, svn_boolean_t_no_diff_deleted, apr_file_t_outfile, apr_file_t_errfile, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def detect_changed_files(self) -> list[Path]:\n repos = [(self.open_repo(), self.git_directory)]\n # Check server and api dirs too\n # Normally these are ignored but we need to check these\n if (server_repo_path := Path(self.git_directory, \"Paper-Server\")).exists():\n repos.append((pygit2.Repository(str(server_repo_path)), server_repo_path))\n if (api_repo_path := Path(self.git_directory, \"Paper-API\")).exists():\n repos.append((pygit2.Repository(str(api_repo_path)), api_repo_path))\n changed = []\n for repo, repo_path in repos:\n changed.extend(p.relative_to(self.git_directory) for p in detect_changed_files(repo, repo_path))\n changed.sort()\n return changed", "def _knownrevs(repo, nodes):\n torev = repo.changelog.nodemap.get\n for n in nodes:\n rev = torev(n)\n if rev is not None:\n yield rev", "def get_files_changed():\n files_list = []\n test = os.popen('git show --name-only')\n repo_location = os.popen('git rev-parse --show-toplevel')\n repo_location = repo_location.readlines()\n repo_location = repo_location[0]\n repo_location = repo_location.replace('\\n', '')\n if \"Not a git repository\" in repo_location:\n files_list.append(\"Not a git repository\")\n return files_list\n files_list.append(repo_location.split('/')[-1])\n output = test.readlines()\n for a in range(6, len(output)):\n files_list.append(output[a].replace('\\n', ''))\n return files_list", "def files_unchanged(self):\n\n passed = []\n failed = []\n ignored = []\n fixed = []\n could_fix = False\n\n # Check that we have the minimum required config\n required_pipeline_config = {\"manifest.name\", \"manifest.description\", \"manifest.author\"}\n missing_pipeline_config = required_pipeline_config.difference(self.nf_config)\n if missing_pipeline_config:\n return {\"ignored\": [f\"Required pipeline config not found - {missing_pipeline_config}\"]}\n try:\n prefix, short_name = self.nf_config[\"manifest.name\"].strip(\"\\\"'\").split(\"/\")\n except ValueError:\n log.warning(\n \"Expected manifest.name to be in the format '<repo>/<pipeline>'. Will assume it is <pipeline> and default to repo 'nf-core'\"\n )\n short_name = self.nf_config[\"manifest.name\"].strip(\"\\\"'\")\n prefix = \"nf-core\"\n\n # NB: Should all be files, not directories\n # List of lists. Passes if any of the files in the sublist are found.\n files_exact = [\n [\".gitattributes\"],\n [\".prettierrc.yml\"],\n [\"CODE_OF_CONDUCT.md\"],\n [\"LICENSE\", \"LICENSE.md\", \"LICENCE\", \"LICENCE.md\"], # NB: British / American spelling\n [os.path.join(\".github\", \".dockstore.yml\")],\n [os.path.join(\".github\", \"CONTRIBUTING.md\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"bug_report.yml\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"config.yml\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"feature_request.yml\")],\n [os.path.join(\".github\", \"PULL_REQUEST_TEMPLATE.md\")],\n [os.path.join(\".github\", \"workflows\", \"branch.yml\")],\n [os.path.join(\".github\", \"workflows\", \"linting_comment.yml\")],\n [os.path.join(\".github\", \"workflows\", \"linting.yml\")],\n [os.path.join(\"assets\", \"email_template.html\")],\n [os.path.join(\"assets\", \"email_template.txt\")],\n [os.path.join(\"assets\", \"sendmail_template.txt\")],\n [os.path.join(\"assets\", f\"nf-core-{short_name}_logo_light.png\")],\n [os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo_light.png\")],\n [os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo_dark.png\")],\n [os.path.join(\"docs\", \"README.md\")],\n [os.path.join(\"lib\", \"nfcore_external_java_deps.jar\")],\n [os.path.join(\"lib\", \"NfcoreTemplate.groovy\")],\n ]\n files_partial = [\n [\".gitignore\", \".prettierignore\", \"pyproject.toml\"],\n ]\n\n # Only show error messages from pipeline creation\n logging.getLogger(\"nf_core.create\").setLevel(logging.ERROR)\n\n # Generate a new pipeline with nf-core create that we can compare to\n tmp_dir = tempfile.mkdtemp()\n\n # Create a template.yaml file for the pipeline creation\n template_yaml = {\n \"name\": short_name,\n \"description\": self.nf_config[\"manifest.description\"].strip(\"\\\"'\"),\n \"author\": self.nf_config[\"manifest.author\"].strip(\"\\\"'\"),\n \"prefix\": prefix,\n }\n\n template_yaml_path = os.path.join(tmp_dir, \"template.yaml\")\n with open(template_yaml_path, \"w\") as fh:\n yaml.dump(template_yaml, fh, default_flow_style=False)\n\n test_pipeline_dir = os.path.join(tmp_dir, f\"{prefix}-{short_name}\")\n create_obj = nf_core.create.PipelineCreate(\n None, None, None, no_git=True, outdir=test_pipeline_dir, template_yaml_path=template_yaml_path\n )\n create_obj.init_pipeline()\n\n # Helper functions for file paths\n def _pf(file_path):\n \"\"\"Helper function - get file path for pipeline file\"\"\"\n return os.path.join(self.wf_path, file_path)\n\n def _tf(file_path):\n \"\"\"Helper function - get file path for template file\"\"\"\n return os.path.join(test_pipeline_dir, file_path)\n\n # Files that must be completely unchanged from template\n for files in files_exact:\n # Ignore if file specified in linting config\n ignore_files = self.lint_config.get(\"files_unchanged\", [])\n if any([f in ignore_files for f in files]):\n ignored.append(f\"File ignored due to lint config: {self._wrap_quotes(files)}\")\n\n # Ignore if we can't find the file\n elif not any([os.path.isfile(_pf(f)) for f in files]):\n ignored.append(f\"File does not exist: {self._wrap_quotes(files)}\")\n\n # Check that the file has an identical match\n else:\n for f in files:\n try:\n if filecmp.cmp(_pf(f), _tf(f), shallow=True):\n passed.append(f\"`{f}` matches the template\")\n else:\n if \"files_unchanged\" in self.fix:\n # Try to fix the problem by overwriting the pipeline file\n shutil.copy(_tf(f), _pf(f))\n passed.append(f\"`{f}` matches the template\")\n fixed.append(f\"`{f}` overwritten with template file\")\n else:\n failed.append(f\"`{f}` does not match the template\")\n could_fix = True\n except FileNotFoundError:\n pass\n\n # Files that can be added to, but that must contain the template contents\n for files in files_partial:\n # Ignore if file specified in linting config\n ignore_files = self.lint_config.get(\"files_unchanged\", [])\n if any([f in ignore_files for f in files]):\n ignored.append(f\"File ignored due to lint config: {self._wrap_quotes(files)}\")\n\n # Ignore if we can't find the file\n elif not any([os.path.isfile(_pf(f)) for f in files]):\n ignored.append(f\"File does not exist: {self._wrap_quotes(files)}\")\n\n # Check that the file contains the template file contents\n else:\n for f in files:\n try:\n with open(_pf(f), \"r\") as fh:\n pipeline_file = fh.read()\n with open(_tf(f), \"r\") as fh:\n template_file = fh.read()\n if template_file in pipeline_file:\n passed.append(f\"`{f}` matches the template\")\n else:\n if \"files_unchanged\" in self.fix:\n # Try to fix the problem by overwriting the pipeline file\n with open(_tf(f), \"r\") as fh:\n template_file = fh.read()\n with open(_pf(f), \"w\") as fh:\n fh.write(template_file)\n passed.append(f\"`{f}` matches the template\")\n fixed.append(f\"`{f}` overwritten with template file\")\n else:\n failed.append(f\"`{f}` does not match the template\")\n could_fix = True\n except FileNotFoundError:\n pass\n\n # cleaning up temporary dir\n shutil.rmtree(tmp_dir)\n\n return {\"passed\": passed, \"failed\": failed, \"ignored\": ignored, \"fixed\": fixed, \"could_fix\": could_fix}", "def get_affected_files(allow_limited=True):\n diff_base = None\n if in_travis():\n # In the case of a pull request into a branch, we want to\n # diff against HEAD in that branch.\n if in_travis_pr():\n diff_base = travis_branch()\n else:\n diff_base = local_diff_branch()\n\n if diff_base is not None and allow_limited:\n result = subprocess.check_output(['git', 'diff', '--name-only',\n diff_base])\n print('Using files changed relative to %s:' % (diff_base,))\n print('-' * 60)\n print(result.rstrip('\\n')) # Don't print trailing newlines.\n print('-' * 60)\n else:\n print('Diff base not specified, listing all files in repository.')\n result = subprocess.check_output(['git', 'ls-files'])\n\n # Only return filenames that exist. For example, 'git diff --name-only'\n # could spit out deleted / renamed files. Another alternative could\n # be to use 'git diff --name-status' and filter out files with a\n # status of 'D'.\n filenames = [filename\n for filename in result.rstrip('\\n').split('\\n')\n if os.path.exists(filename)]\n return filenames, diff_base", "def _find_changes(self):\n added = set()\n modified = set()\n existing_files = set()\n for dirpath_str, _, filenames in walk(str(self.path)):\n dirpath = Path(dirpath_str)\n for filename in filenames:\n if filename == DB_FILENAME:\n continue\n abs_filename = (dirpath / filename).absolute()\n if abs_filename in self.entries:\n entry = self.entries[abs_filename]\n existing_files.add(entry)\n st = lstat(str(abs_filename))\n if entry != st:\n modified.add(entry)\n else:\n try:\n entry = HashEntry(abs_filename)\n entry.update_attrs()\n added.add(entry)\n except FileNotFoundError:\n # If file was removed between listing and processing,\n # just treat it as if it never existed\n # We have nothing to compare it to anyway\n pass\n removed = set(self.entries.values()) - existing_files\n return added, removed, modified" ]
[ "0.6097362", "0.5880286", "0.5643552", "0.563514", "0.55972046", "0.54928887", "0.5419477", "0.5408571", "0.5355736", "0.53230184", "0.5308404", "0.5283856", "0.526689", "0.524749", "0.5239555", "0.521562", "0.5209716", "0.5176624", "0.5137223", "0.51304287", "0.5037162", "0.5029366", "0.50091654", "0.5001975", "0.49919736", "0.49786493", "0.49782497", "0.4977058", "0.4967674", "0.49540344" ]
0.7239463
0
Creates a two dimensional n (rows) x m (columns) board filled with zeros 4 x 5 board 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 x 2 board 0 0 0 0 0 0 >>> create_board(4, 5) [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]] >>> create_board(3, 2) [[0, 0], [0, 0], [0, 0]]
def create_board(n, m): if n == 0 or m == 0: raise IndexError("dimensions cannot both be zero") if n < 0 or m < 0: raise IndexError("dimensions cannot be negative") board = [] rows = [0] * m for i in range(n): board.append(rows) return board
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_board(N):\n board = [[0 for x in range(N)] for y in range(N)] \n return board", "def new_board(n: int) -> Board:\n\n return [[0 for _ in range(n)] for _ in range(n)]", "def create_board(rows, columns):\n res = [[0 for i in range(columns)] for j in range(rows)]\n return res", "def makeBoard(m,n):\n if m < 2 or n < 2:\n raise ValueError('Grid must be at least 2x2')\n grid = []\n for y in range(n):\n row = []\n for x in range(m):\n value = 1 if random.randint(0,4) % 4 == 0 else 0\n if x==0 and y==0:\n value = 0\n if x==(m-1) and y==(n-1):\n value = 0\n row.append(value)\n grid.append(row)\n return grid", "def create_board(self):\n # # empty 7x7 board\n # board = [[list() for x in range(7)] for y in range(7)]\n # # coordinates of starting marbles\n # black = [[0, 0], [1, 0], [1, 1], [0, 1], [6, 6], [6, 5], [5, 5], [5, 6]]\n # white = [[6, 0], [6, 1], [5, 1], [5, 0], [0, 6], [0, 5], [1, 5], [1, 6]]\n # red = [[1, 3], [2, 2], [2, 3], [2, 4], [3, 1], [3, 2], [3, 3], [3, 4], [3, 5], [4, 2], [4, 3], [4, 4], [5, 3]]\n # for marble in white:\n # board[marble[0]][marble[1]] = \"B\"\n # for marble in black:\n # board[marble[0]][marble[1]] = \"W\"\n # for marble in red:\n # board[marble[0]][marble[1]] = \"R\"\n # return board\n pass", "def create_board(board_size):\n board = []\n for i in range(board_size):\n row = []\n for j in range(board_size):\n row.append('-')\n board.append(row)\n return board", "def make_board():\n return [[0 for i in range(8)] for i in range(8)]", "def board_init():\n board = [[[i for i in range(1,n+1)] for j in range(n)] for k in range(n)]\n return board", "def make_board(N):\n assert N >= 1, \"Invalid board dimension\";\n assert type(N) == int, \"N must be an integer\";\n return [[\"*\" for x in range(N)] for x in range(N)];", "def _create_board(self):\n board = []\n for i in range(self.rows):\n row = []\n for j in range(self.columns):\n row.append(\n {\n \"c\": j + 1, # c column number base 1\n \"r\": i + 1, # r row number base 1\n \"v\": False, # v visible\n \"f\": 0, # f flag\n \"n\": 0, # n neighbors value\n \"b\": False, # has a bomb , The bombs are created on start\n }\n )\n board.append(row)\n self.board = board", "def create_board_matrix(self, height, width):\n return [[' '] * width for _ in xrange(height)]", "def createboard(rows,columns):\n row_size = ''\n for rows in range(rows):\n if rows == 0:\n row_size = row_size + '0'\n else:\n row_size = row_size + ',0'\n fullmatrix = ''\n for cols in range(columns):\n if cols == 0:\n fullmatrix = fullmatrix + row_size\n else:\n fullmatrix = fullmatrix + '; ' + row_size\n return fullmatrix", "def create_board(self, dimension):\n\n board = []\n\n for i in range(dimension):\n row = []\n for j in range(dimension):\n row.append(' ')\n board.append(row)\n\n return board", "def createBoard(self):\n self.board = []\n for row in range(self.height):\n self.board += [self.createRow()]\n return self.board", "def createBoard(width, height):\r\n board = []\r\n for i in range(height):\r\n board = board+[createOneRow(width)]\r\n return board\r\n # or\r", "def generate_board(self):\n new_board = []\n for i in range(self.size):\n new_board.append([\"0\"] * self.size)\n return new_board", "def new_board() -> list:\n board = []\n for _ in range(BOARDHEIGHT):\n board.append([BLANK] * BOARDWIDTH)\n\n return board", "def new_board(width=10,height=\"height\"):\n \n if height==\"height\": # if no height given, give the width value\n height = width\n\n # verifies the input\n if height <=0 or width <= 0:\n return\n # create the board\n board =[]\n for heig in range(height): # rows\n board.append([])\n for wid in range(width): # lines\n board[heig].append(None)\n return board", "def createBoard(width, height):\n A = []\n for row in range(height):\n A += [createOneRow(width)]\n return A", "def new_board(self):\n\n # delete all objects\n self.canvas.delete('all')\n\n # reset\n self.board = [\n [self.EMPTY, self.EMPTY, self.EMPTY],\n [self.EMPTY, self.EMPTY, self.EMPTY],\n [self.EMPTY, self.EMPTY, self.EMPTY]]\n\n # draw grid\n for n in range(1, 3):\n # vertical\n self.canvas.create_line(\n self.CELL_SIZE*n, 0,\n self.CELL_SIZE*n, self.WINDOW_SIZE,\n width=self.GRID_LINE_WIDTH, fill=self.GRID_COLOR)\n # horizontal\n self.canvas.create_line(\n 0, self.CELL_SIZE*n,\n self.WINDOW_SIZE, self.CELL_SIZE*n,\n width=self.GRID_LINE_WIDTH, fill=self.GRID_COLOR)", "def create_board(self, size):\n x = np.arange(0, size[0])\n y = np.arange(0, size[1])\n board = np.meshgrid(x, y)\n return board", "def _make_board(self, rows, cols, top_left):\n board = []\n for i in range(rows):\n board.append([])\n for j in range(cols):\n board[-1].append(\" \")\n\n top_left_row = math.floor(self._num_rows/2)\n top_left_col = math.floor(self._num_cols/2)\n if top_left == \"B\":\n board[top_left_row-1][top_left_col-1] = top_left\n board[top_left_row-1][top_left_col+1-1] = \"W\" \n board[top_left_row+1-1][top_left_col-1] = \"W\" \n board[top_left_row+1-1][top_left_col+1-1] = \"B\" \n elif top_left == \"W\":\n board[top_left_row-1][top_left_col-1] = top_left \n board[top_left_row-1][top_left_col+1-1] = \"B\" \n board[top_left_row+1-1][top_left_col-1] = \"B\" \n board[top_left_row+1-1][top_left_col+1-1] = \"W\"\n\n return board", "def make_board(row_size: int, column_size: int) -> list:\n board = []\n for r in range(row_size): # Creates a list for each row.\n row = []\n for c in range(column_size): # Populates the list with a pair of coords for each row.\n row.append((c, r))\n board.append(row)\n return board", "def create_board(self, size):\n self.board = [\n [FieldState.EMPTY for _ in range(size)]\n for _ in range(size)\n ]", "def make_board(self, ):\n for r in range(self.boardSize):\n for c in range(self.boardSize): # avoid redundant calculation by adding neighbors \"behind\" current cell\n new_cell = Cell(r, c)\n self.board[r][c] = new_cell\n if c > 0: # add left neighbor-cell\n new_cell.add_neighbor(self.board[r][c-1])\n if r > 0: # add above neighbor-cell\n new_cell.add_neighbor(self.board[r-1][c])\n if r > 0 and c < self.boardSize-1: # add right diagonal neighbor-cell\n new_cell.add_neighbor(self.board[r-1][c+1])", "def create_board(width, height):\n a = []\n for row in range(height):\n a += [createOneRow(width)] # gebruik de bovenstaande functie zodat ... één rij is!!\n return a", "def create_board(size) -> list:\n return list(itertools.product([i for i in range(size)], repeat=2))", "def __init__(self,m,n):\n self.columns = m\n self.rows = n\n self.board = makeBoard(m,n)", "def board(constraints):\n rows = len(constraints[0])\n columns = len(constraints[1])\n board = []\n for i in range(rows):\n board.append([Empty for k in range(columns)])\n return board", "def makeBoard(n):\n valid_positions = []\n for i in range(0, n):\n for j in range(0,n):\n valid_positions.append(Position(i,j))\n return valid_positions" ]
[ "0.81044716", "0.79067606", "0.78810954", "0.7877859", "0.7731745", "0.76694953", "0.7603823", "0.7559844", "0.7542552", "0.75009793", "0.74589515", "0.7425115", "0.7358076", "0.72688156", "0.72640264", "0.72328144", "0.7185882", "0.71758914", "0.71044713", "0.7032611", "0.7012673", "0.7010495", "0.70098925", "0.69772166", "0.6921578", "0.69153965", "0.69044334", "0.68965745", "0.68815976", "0.68717283" ]
0.8604134
0
A special invert function that will return 1/x, except in the case that we pass in x = 0, in which case we return 1
def invert(x): try: return 1 / x except ZeroDivisionError as e: print(e) return 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def invert0(x):\n return 0 if x > 0 else 1", "def invert(x):\n return linalg.inv(x)", "def opposite(x):\n return -1*x", "def inverse(self, x):\n x = np.asarray(x)\n def r(vec):\n return utils.recycled(vec, as_=x)\n if self.zero is not None and self.multiplier is not None:\n x = x / r(self.multiplier) + r(self.zero)\n elif self.zero is not None:\n x = x + r(self.zero)\n elif self.multiplier is not None:\n x = x / r(self.multiplier)\n return x", "def negate(x):\n return x ^ 1", "def _invert(x, limits):\n return limits[1] - (x - limits[0])", "def invert(val):\n return -1 * coerce_to_int(val)", "def inverse_sigmoid(x):\n y = -1 * np.log((1-x)/x)\n return y", "def _inverse(self, x):\n alpha, beta = self._get_alpha_beta()\n diff = x - self.x0\n r = tf.linalg.norm(diff, axis=-1, keepdims=True)\n h = 1. / (alpha + r)\n beta_h = beta * h\n return x + beta_h * diff", "def inverse_exponential(x):\n return math.exp(-x)", "def denorm(self, x):\n out = (x + 1) / 2\n return out.clamp(0, 1)", "def invert_var(self):\n return 1./self.var", "def denorm(self, x):\n out = (x + 1) / 2\n return out.clamp_(0, 1)", "def denorm(self, x):\n out = (x + 1) / 2\n return out.clamp_(0, 1)", "def denorm(self, x):\n out = (x + 1) / 2\n return out.clamp_(0, 1)", "def inverse(func: Callable):\n @wraps(func)\n def _wrapper(*args, **kwargs):\n return 1.0 / func(*args, **kwargs)\n return _wrapper", "def denorm(x):\n out = (x + 1) / 2\n return out.clamp(0, 1)", "def inverse(self, x):\n return self.mul(self.weights, x.unsqueeze(-1)).squeeze(-1) + self.shift\n #return self.mul(torch.inverse(self.weights), (x - self.shift).unsqueeze(-1)).squeeze(-1)", "def inverse(series):\n\n result = 1 / series\n result.name = 'inv ({})'.format(series.name)\n\n return result", "def denorm(x):\n out = (x + 1) / 2\n return out.clamp_(0, 1)", "def denorm(x):\n out = (x + 1) / 2\n return out.clamp_(0, 1)", "def denorm1(x):\n out = (x + 1) / 2\n return out.clamp_(0, 1)", "def inverse(num, r):\n if int(num) == 0:\n return 0\n else:\n # Multiply with every number in the field and\n # check if the result is one. Easy Peasy!\n # Todo: Use Extended Euclidean Algo\n # or Logs/Anti-Logs\n for i in range(1, 256):\n if _multiply(num, i, r) == 1:\n return i", "def inv(self):\n\t\tdeterminant = self.det()\n\t\tif determinant:\n\t\t\treturn self.adj() / determinant\n\t\telse:\n\t\t\traise ValueError(\"Not Invertible\")", "def inverse( self ):\r\n\t\treturn fraction( self.denominator, self.numerator )", "def __invert__(self):\r\n return 1 - self", "def _invert_nonzero(arr):\n arr_inv = arr.copy()\n nz = np.nonzero(arr)\n arr_inv[nz] = 1 / arr[nz]\n return arr_inv", "def inverseintegrate(x, power):\n if power == -1:\n return exp(x)\n else:\n return pow(x*(power+1.), 1./(power+1.))", "def __invert__(self):\n return self.inverse()", "def _invert(G):\n return Surreal.from_value(1 / G._n)" ]
[ "0.84756213", "0.78640175", "0.765517", "0.757828", "0.74859875", "0.6893111", "0.68644345", "0.68165404", "0.6784958", "0.6714442", "0.66958183", "0.6689986", "0.6670409", "0.6670409", "0.6670409", "0.66407967", "0.6587519", "0.65857893", "0.65789133", "0.6560175", "0.6560175", "0.6556248", "0.6514506", "0.6494067", "0.64916646", "0.64635104", "0.64633256", "0.6426578", "0.6416745", "0.6404931" ]
0.87478536
0
Save a boxplot of given data. Boxplot display the minimum and the maximum, quartiles 1, 2 and 3 and percentiles 2th and 98th. Also the mean is displayed as a curve.
def save_box_plot(data, fname="box_plot.pdf", axis_labels=None, plot_title=None, plot_suptitle="None"): figure() transpose = lambda l: [[l[j][i] for j in range(len(l))] for i in range(len(l[0]))] tr_data = transpose(data) # boxplot boxplot(tr_data) # plot the mean as a curve avg = lambda l: sum(l)/len(l) avg_data = map(avg, tr_data) plot(avg_data) # axis labels if not axis_labels == None: xlabel(axis_labels[0]) ylabel(axis_labels[1]) if not plot_title is None: title(plot_title) if not plot_suptitle is None: suptitle(plot_suptitle) savefig(fname)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def boxplot(values):\n percentiles = percentile(values, [0, 25, 50, 75, 100])\n result = {'min_val': percentiles[0],\n 'q1_val': percentiles[1],\n 'mean_val': percentiles[2],\n 'q3_val': percentiles[3],\n 'max_val': percentiles[4]}\n return result", "def boxplot(L, out_file_name):\n if os.path.exists(out_file_name):\n raise FileExistsError('File already exists.')\n\n mean = math_lib.list_mean(L)\n stdev = math_lib.list_stdev(L)\n fig = plt.figure(dpi=300)\n\n ax = fig.add_subplot(1, 1, 1)\n plt.boxplot(L)\n plt.title(\"mean: {} stdev: {}\".format(mean, stdev))\n plt.ylabel('Box')\n plt.ylabel('Distribution')\n plt.show()\n plt.savefig(out_file_name)", "def boxplot(data, meta, x_label, y_label, title, out_file):\n fig, ax1 = plt.subplots(figsize=(10, 6))\n\n ax1.boxplot(data)\n\n # Hide these grid behind plot objects\n ax1.set_axisbelow(True)\n ax1.set_title(title)\n ax1.set_xlabel(x_label)\n ax1.set_ylabel(y_label)\n\n # set tick labels and export the result\n ax1.set_xticklabels(meta, rotation=90, fontsize=8)\n fig.savefig(out_file, bbox_inches='tight')", "def boxplot_data(data, output_name=\"output.png\"):\r\n xlabels = ['Valence', 'Anger', 'Fear', 'Sadness', 'Joy']\r\n data_to_plt = [\r\n data[:][\"valence_intensity\"], data[:]['anger_intensity'],\r\n data[:]['fear_intensity'], data[:]['sadness_intensity'],\r\n data[:]['joy_intensity']\r\n ]\r\n b_dict = {'patch_artist': True,\r\n 'medianprops': dict(linestyle='-', linewidth=1, color='k')}\r\n plt.figure(figsize=(10, 7))\r\n boxs = plt.boxplot(data_to_plt, labels=xlabels, **b_dict)\r\n plt.title('Distribution of Sentiment')\r\n plt.grid(axis='y')\r\n plt.ylabel('Values')\r\n plt.xlabel('Sentiment')\r\n colors = ['green', 'red', 'purple', 'blue', 'yellow']\r\n for square, color in zip(boxs['boxes'], colors):\r\n square.set_facecolor(color)\r\n plt.plot()\r\n # Only comment below line when debugging. Uncomment when submitting\r\n plt.savefig(output_name)", "def test_boxplot(self):\n values = [37, 48, 30, 53, 3, 83, 19, 71, 90, 16, 19, 7, 11, 43, 43]\n result = boxplot(values)\n self.assertEqual(3, result['min_val'])\n self.assertEqual(17.5, result['q1_val'])\n self.assertEqual(37, result['mean_val'])\n self.assertEqual(50.5, result['q3_val'])\n self.assertEqual(90, result['max_val'])", "def data_to_plot(cleaned_data, save_filename=None):\n data_to_plot = cleaned_data.copy()\n foo = data_to_plot['studio'].value_counts()\n data_to_plot[\"studio_num_movies\"] = data_to_plot['studio'].map(foo)\n data_to_plot = data_to_plot[data_to_plot[\"studio_num_movies\"] > 1]\n studios_by_avg_roi_desc = list(data_to_plot.groupby(\"studio\")['ROI'].mean().sort_values(ascending=False).index)\n boxplot = sns.boxplot(x=\"studio\", y=\"ROI\", data=data_to_plot, order=studios_by_avg_roi_desc)\n plt.xticks(rotation=30)\n\n if save_filename:\n boxplot.savefig(save_filename)\n \n return boxplot", "def boxplot(data):\n sns.boxplot(data, width=0.5, palette=\"colorblind\")\n # add points on the plot\n sns.swarmplot(data, color='red', alpha=0.75)", "def boxPlot(self):\n clf()\n boxplot(self.y,positions=self.x,widths=0.5)\n xlabel('X Label (units)')\n ylabel('Y Label (units)')\n savefig('boxplot.png')", "def plot_box_multi(filename, data, classes, classes_unique=None, xlab='x', ylab='y', title='Box-Plot', figwidth=8, figheight=6, ymin=0, ymax=10):\n import matplotlib as mpl\n mpl.use(\"pdf\")\n import matplotlib.pyplot as plt\n data=np.array(data)\n\n #if classes_unique is None:\n # class_unique=np.unique(classes)\n \n data_plot=[]\n for cl in classes_unique:\n data_cl=data[classes==cl]\n data_plot.append(data_cl)\n\n fig=plt.figure(num=1,figsize=(figwidth,figheight))\n ax=fig.add_subplot(1,1,1)\n ax.boxplot(data_plot)\n \n # add some text for labels, title and axes ticks\n ax.set_ylim(ymin,ymax)\n ax.set_ylabel(ylab,fontsize=12)\n ax.set_xlabel(xlab,fontsize=12) \n ax.set_title(title,fontsize=15)\n #ind = np.arange(len(classes_unique))\n #ax.set_xticks(ind)\n ax.set_xticklabels( classes_unique )\n plt.setp(ax.get_xticklabels(), fontsize=12, rotation=90)\n plt.setp(ax.get_yticklabels(), fontsize=12)\n # shrink axis box \n #box = ax.get_position()\n #ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n #ax.legend( method_bar, methods, loc='lower left', bbox_to_anchor=(1.0, 0.3), fontsize=12 )\n #plt.show()\n plt.subplots_adjust(bottom=0.12) # may this is not working because of the following setting\n fig.savefig(filename,bbox_inches='tight')\n plt.close(fig)", "def _boxplot(volas, labels):\n fig = plt.figure(figsize=(12, 8))\n fig.add_subplot(111, ylabel='Volatility')\n plt.ylim(0, 1)\n plt.boxplot(volas, labels=labels)", "def plotBox(self,year,savePlot=False): \n self.df.boxplot(year, by='Region')\n plt.xlabel('Region')\n plt.ylabel('Income')\n if savePlot:\n fileName=\"boxPlot_\"+str(year)+\".pdf\"\n plt.savefig(fileName)\n else:\n plt.show() \n plt.clf()", "def plotBoxplot(data_clust, save=False, *args):\n sous_echantillon = data_clust.copy()\n modalites = sous_echantillon[\"Clusters\"].unique()\n for var in data_clust.columns:\n X = \"Clusters\" # qualitative\n Y = var # quantitative\n groupes = []\n for m in modalites:\n groupes.append(sous_echantillon[sous_echantillon[X] == m][Y].dropna())\n medianprops = {'color':\"black\"}\n meanprops = {'marker':'o', 'markeredgecolor':'black',\n 'markerfacecolor':'firebrick'}\n plt.figure(figsize=[10, 10])\n plt.boxplot(groupes, labels=modalites, showfliers=False, medianprops=medianprops,\n vert=False, patch_artist=True, showmeans=True, meanprops=meanprops)\n plt.title(\"Boxplot\")\n plt.xlabel(var)\n plt.ylabel(\"Clusters\")\n if save == True:\n try:\n plt.savefig(args + \"boxplot\" + var + \".png\")\n except NameError:\n print('Missing the path for saving')\n #display the plot\n plt.show()", "def box_plot():\n # download the data from Kaggle: https://www.kaggle.com/camnugent/california-housing-prices/download\n\n df = pd.read_csv('./housing.csv')\n df = df.dropna()\n\n plt.figure(figsize=(10, 6))\n sns.boxplot(data=df, x='ocean_proximity', y='median_house_value', palette='viridis')\n plt.title('Box plot demo w/ Kaggle house pricing data')\n plt.show()\n\n return None", "def combo(L, out_file_name):\n if os.path.exists(out_file_name):\n raise FileExistsError('File already exists.')\n\n mean = math_lib.list_mean(L)\n stdev = math_lib.list_stdev(L)\n fig = plt.figure(dpi=300)\n\n ax = fig.add_subplot(2, 1, 1)\n plt.boxplot(L)\n plt.title(\"mean: {} stdev: {}\".format(mean, stdev))\n plt.ylabel('Box')\n plt.ylabel('Distribution')\n plt.savefig(out_file_name)\n\n ax = fig.add_subplot(2, 1, 2)\n plt.hist(L)\n plt.title(\"mean: {} stdev: {}\".format(mean, stdev))\n plt.xlabel('Value')\n plt.ylabel('Frequency')\n plt.savefig(out_file_name)", "def boxplot_2_features(df, x, y, ylim_i = 0, set_y_limit = False, order_boxplot = False, print_value = False, num_label = 1, save_plot = False, path_dir = None):\n \n value_counts_temp = df[x].value_counts()\n sns.set(font_scale=2)\n f, ax = plt.subplots(figsize=(18, 7));\n if order_boxplot :\n plot =sns.boxplot(x=x, y=y, data=df, order = value_counts_temp.index)\n else:\n plot =sns.boxplot(x=x, y=y, data=df) \n ax.set_title('Boxplot of {} group by {}'.format(y, x));\n plt.xticks(rotation=90);\n if set_y_limit:\n ax.set_ylim(0, ylim_i);\n for ind, label in enumerate(plot.get_xticklabels()):\n if ind % num_label == 0: # every 15th label is kept\n label.set_visible(True)\n else:\n label.set_visible(False)\n if print_value :\n print(value_counts_temp)\n if save_plot == True:\n plt.savefig((plot_dir + \"boxplot\"+str(y)+\"per _\"+str(x)+\".png\"))\n plt.clf()", "def plot_violin_box(data,\n x,\n y,\n bw=1,\n cut=0,\n scale='area',\n figsize=(10, 5),\n xrot=0,\n yrot=0,\n xlab=float('nan'),\n ylab=float('nan')):\n fig, axes = plt.subplots(1, 2, figsize=figsize)\n plt.xticks(rotation=xrot)\n\n # Violin Plot\n ax = sns.violinplot(x=x, y=y, data=data, palette=\"RdBu\",\n cut=0, ax=axes[0], scale=scale,\n inner='quartile', bw=bw)\n ax.set_xticklabels(ax.get_xticklabels(), rotation=xrot) # , fontsize = 8\n ax.set_yticklabels(ax.get_yticklabels(), rotation=yrot) # , fontsize = 8\n if (pd.notna(xlab)):\n ax.set_xlabel(xlab)\n if (pd.notna(ylab)):\n ax.set_ylabel(ylab)\n\n # Box Plot\n ax = sns.boxplot(x=x, y=y, data=data, palette=\"RdBu\", ax=axes[1])\n ax.set_xticklabels(ax.get_xticklabels(), rotation=xrot) # , fontsize = 8\n ax.set_yticklabels(ax.get_yticklabels(), rotation=yrot) # , fontsize = 8\n if (pd.notna(xlab)):\n ax.set_xlabel(xlab)\n if (pd.notna(ylab)):\n ax.set_ylabel(ylab)", "def box_plot_continuous(df, cont_feat, percentiles = 0.05, save_plot = False, path_dir = None):\n cont_feat = list(set(cont_feat))\n if len(cont_feat) != 0:\n p_size = min(2, len(cont_feat))\n j = int(len(cont_feat) / 2) + 1\n plt.figure(figsize=(4.5 ** p_size, 4.5 ** p_size))\n for i, col in enumerate(cont_feat):\n if percentiles != None:\n good_data = df[df[col].quantile(percentiles) < df[col]] \n good_data = good_data[good_data[col] < good_data[col].quantile(1-percentiles)] \n else :\n good_data = df\n \n plt.subplot(j, p_size, i + 1)\n good_data[~good_data[col].isnull()][[col]].boxplot(fontsize=10*p_size)\n plt.title(str(\"Box plot of \" + col),fontsize=10*p_size)\n plt.xticks(size=8*p_size)\n plt.yticks(size=8*p_size)\n plt.tight_layout()\n plt.show(block=False)\n if save_plot == True:\n plt.savefig((plot_dir + \"Box_plot_continuous_feature .png\"))\n plt.clf()\n else:\n print(\"No Continuous feature to plot\")", "def boxplot(self):\n\n data_for_boxplot=self.data.pivot(index='Country', columns='Region', values='Income')\n # this creates a dataframe with Country index and 6 region columns that contain Income or NaN (the default setting in boxplot automatically drops NaNs)\n \n data_for_boxplot.plot.box(figsize=(10,5))\n plt.title(\"Boxplots of Income Per Capita \\n In \" + str(self.year) +\", by Region\")\n plt.xlabel(\"Regions\")\n plt.ylabel(\"Income per capita\")\n plt.ylim(0, 100000)\n # the upper y-axis limit is set to $100,000 so that it is easier to compare boxplots over time\n plt.savefig('Boxplots for Year ' + str(self.year) +'.pdf')\n pylab.show()", "def add_boxplotlike_data(stats, y_bottom,y_mid,y_top, y_label,method_index,statistic=\"mean_SD\"):\n if statistic==\"median_IQR\":\n x1,x2,x3=tuple(np.quantile(stats, q=[.25, .50, .75]))\n elif statistic==\"mean_SD\":\n sd = np.std(stats)\n x2 = np.mean(stats)\n x1 = x2 - sd\n x3 = x2 + sd\n elif statistic==\"meanall_replicatesd\": # when joining different fitfuns\n\n x2=np.mean(np.array(stats))\n sds=[np.std(stats[i]) for i in range(len(stats))]\n sd=np.mean(sds)\n x1= x2 - sd\n x3 = x2 + sd\n # assumes fitfun is first dimension of stats\n\n else:\n raise Exception(\"statistic %s not known\"%(statistic))\n\n y_bottom[y_label][method_index].append(x1)\n y_mid[y_label][method_index].append(x2)\n y_top[y_label][method_index].append(x3)", "def showBoxplot(data, title):\n sns.set_theme(style=\"whitegrid\")\n df = pd.DataFrame(data=data[:, 0:2], columns=(0, 1))\n df = df.rename(columns={0: 'First principal component', 1: 'Second principal component'})\n fig = plt.figure(figsize=(15, 15))\n fig.suptitle(title, fontsize=18)\n ax1 = fig.add_subplot(221)\n ax1 = sns.boxplot(x=df['First principal component'])\n ax2 = fig.add_subplot(222)\n ax2 = sns.boxplot(y=df['Second principal component'])\n plt.show()", "def custom_boxplot(ax, x, y, error, xlims, ylims, mediancolor='magenta'):\n\n medianprops = {'color': mediancolor, 'linewidth': 2}\n boxprops = {'color': 'black', 'linestyle': '-'}\n whiskerprops = {'color': 'black', 'linestyle': '-'}\n capprops = {'color': 'black', 'linestyle': '-'}\n flierprops = {'color': 'black', 'marker': 'x'}\n\n ax.boxplot(y,\n positions=x,\n medianprops=medianprops,\n boxprops=boxprops,\n whiskerprops=whiskerprops,\n capprops=capprops,\n flierprops=flierprops)\n\n ax.set_xlim(xlims)\n ax.set_ylim(ylims)\n\n return ax", "def drawBoxplot(data, column, yLabel, unit):\n sns.set(style='darkgrid')\n plt.style.use('default')\n plt.style.use('dark_background')\n types = getSpectralTypes()\n colors = getColors()\n sns.set_palette(sns.color_palette(colors))\n fig, ax = plt.subplots(figsize=(14,7))\n sns.boxplot(x='spectral_type', y=column, data=data, order=types)\n plt.ylabel(yLabel + \" \" + unit)\n plt.xlabel(\"Spektraltyp\")\n plt.show()", "def visualization(data):\n\t# preview top 5 row of data\n\tprint(\"\\n--------Data preview--------\\n{0}\"\n\t\t .format(data.head()))\n\tprint(\"\\nNull value status as follow:\\n{0}\".format(data.isnull().sum()))\n\tcols = [col for col in data.columns]\n\tprint(\"\\nNumber of original features: {0}\".format(len(cols)))\n\tprint(\"\\nFeatures types:\\n{0}\".format(data[cols].dtypes.value_counts()))\n\n\tcounts = [[], [], []]\n\tfor col in cols:\n\t\t# the data type of each feature\n\t\ttyp = data[col].dtype\n\t\t# the number of differents value in each feature\n\t\tuniq = len(np.unique(data[col]))\n\t\t# constant value feature\n\t\tif uniq == 1:\n\t\t\tcounts[0].append(col)\n\t\t# binary value feature\n\t\telif uniq == 2 and typ == np.int64:\n\t\t\tcounts[1].append(col)\n\t\t# multiple value feature\n\t\telse:\n\t\t\tcounts[2].append(col)\n\n\tprint('\\nConstant features: {}\\nBinary features: {} \\nCategorical features: {}\\n'.format(*[len(c) for c in counts]))\n\tprint('Constant features:', counts[0])\n\tprint('Binary features:', counts[1])\n\tprint('Categorical features:', counts[2])\n\n\tfig, axes = plt.subplots(2,2)\n\tfig.set_size_inches(12, 10)\n\tsn.boxplot(data=data,y=\"count\",orient=\"v\",ax=axes[0][0])\n\tsn.boxplot(data=data,y=\"count\",x=\"season\",orient=\"v\",ax=axes[0][1])\n\tsn.boxplot(data=data,y=\"count\",x=\"hour\",orient=\"v\",ax=axes[1][0])\n\tsn.boxplot(data=data,y=\"count\",x=\"workingday\",orient=\"v\",ax=axes[1][1])\n\n\taxes[0][0].set(ylabel='Count',title=\"Box Plot On Count\")\n\taxes[0][1].set(xlabel='Season', ylabel='Count',title=\"Box Plot On Count Across Season\")\n\taxes[1][0].set(xlabel='Hour Of The Day', ylabel='Count',title=\"Box Plot On Count Across Hour Of The Day\")\n\taxes[1][1].set(xlabel='Working Day', ylabel='Count',title=\"Box Plot On Count Across Working Day\")\n\tplt.show()\n\n\tfig,(ax1,ax2,ax3,ax4)= plt.subplots(nrows=4)\n\tfig.set_size_inches(12,20)\n\tsortOrder = [1,2,3,4,5,6,7,8,9,10,11,12]\n\thueOrder = [\"Sunday\",\"Monday\",\"Tuesday\",\"Wednesday\",\"Thursday\",\"Friday\",\"Saturday\"]\n\n\tmonthAggregated = pd.DataFrame(data.groupby(\"month\")[\"count\"].mean()).reset_index()\n\tmonthSorted = monthAggregated.sort_values(by=\"count\",ascending=False)\n\tsn.barplot(data=monthSorted,x=\"month\",y=\"count\",ax=ax1,order=sortOrder)\n\tax1.set(xlabel='Month', ylabel='Avearage Count',title=\"Average Count By Month\")\n\n\thourAggregated = pd.DataFrame(data.groupby([\"hour\",\"season\"],sort=True)[\"count\"].mean()).reset_index()\n\tsn.pointplot(x=hourAggregated[\"hour\"], y=hourAggregated[\"count\"],hue=hourAggregated[\"season\"],\n\t data=hourAggregated, join=True,ax=ax2)\n\tax2.set(xlabel='Hour Of The Day', ylabel='Users Count',\n\t title=\"Average Users Count By Hour Of The Day Across Season\",label='big')\n\n\thourAggregated = pd.DataFrame(data.groupby([\"hour\",\"weekday\"],sort=True)[\"count\"].mean()).reset_index()\n\tsn.pointplot(x=hourAggregated[\"hour\"], y=hourAggregated[\"count\"],hue=hourAggregated[\"weekday\"],hue_order=hueOrder,\n\t data=hourAggregated, join=True,ax=ax3)\n\tax3.set(xlabel='Hour Of The Day', ylabel='Users Count',\n\t title=\"Average Users Count By Hour Of The Day Across Weekdays\",label='big')\n\n\thourTransformed = pd.melt(data[[\"hour\",\"casual\",\"registered\"]], id_vars=['hour'], value_vars=['casual', 'registered'])\n\thourAggregated = pd.DataFrame(hourTransformed.groupby([\"hour\",\"variable\"],sort=True)[\"value\"].mean()).reset_index()\n\tsn.pointplot(x=hourAggregated[\"hour\"], y=hourAggregated[\"value\"],hue=hourAggregated[\"variable\"],\n\t hue_order=[\"casual\",\"registered\"], data=hourAggregated, join=True,ax=ax4)\n\tax4.set(xlabel='Hour Of The Day', ylabel='Users Count',\n\t title=\"Average Users Count By Hour Of The Day Across User Type\",label='big')\n\tplt.show()", "def plot_aggregate(values, label='', smth_wnd=50, plot_mean=False, plot_stdev=False, plot_med=True, plot_iqr=True,\n\t\t\t\t plot_ext=False):\n\tif label != '':\n\t\tlabel += ' '\n\n\tsmoothen = True if 0 < 3 * smth_wnd < values.shape[1] else False\n\n\tx_values = np.arange(1, values.shape[1] + 1)\n\n\tmeans = np.mean(values, axis=0)\n\tif smoothen:\n\t\tmeans = pd.Series(means).rolling(smth_wnd, min_periods=smth_wnd).mean()\n\n\tif plot_stdev:\n\t\tstd_dev = np.std(values, axis=0)\n\n\t\tif smoothen:\n\t\t\tstd_dev = pd.Series(std_dev).rolling(smth_wnd, min_periods=smth_wnd).mean()\n\n\t\tplt.fill_between(x_values, means - std_dev, means + std_dev, alpha=0.25, label=label + '1×σ')\n\n\tif plot_mean:\n\t\tplt.plot(x_values, means, '--', label=label + 'Mean')\n\n\tif plot_iqr:\n\t\tiqr_25 = np.percentile(values, 25, axis=0)\n\t\tiqr_75 = np.percentile(values, 75, axis=0)\n\n\t\tif smoothen:\n\t\t\tiqr_25 = pd.Series(iqr_25).rolling(smth_wnd, min_periods=smth_wnd).mean()\n\t\t\tiqr_75 = pd.Series(iqr_75).rolling(smth_wnd, min_periods=smth_wnd).mean()\n\n\t\tplt.fill_between(x_values, iqr_25, iqr_75, alpha=0.45, label=label + 'IQR')\n\n\tif plot_med:\n\t\tmedians = np.percentile(values, 50, axis=0)\n\n\t\tif smoothen:\n\t\t\tmedians = pd.Series(medians).rolling(smth_wnd, min_periods=smth_wnd).mean()\n\n\t\tplt.plot(x_values, medians, '--', label=label + 'Median', linewidth=1.5)\n\n\tif plot_ext:\n\t\text_min = np.min(values, axis=0)\n\t\text_max = np.max(values, axis=0)\n\n\t\tif smoothen:\n\t\t\text_min = pd.Series(ext_min).rolling(smth_wnd, min_periods=smth_wnd).mean()\n\t\t\text_max = pd.Series(ext_max).rolling(smth_wnd, min_periods=smth_wnd).mean()\n\n\t\tplt.fill_between(x_values, ext_min, ext_max, alpha=0.125, label=label + 'Extremes')", "def box_plots(norm, original):\n bp = plt.boxplot([norm, original], notch=False, patch_artist=True)\n for box in bp['boxes']:\n box.set(color=\"red\")\n box.set(color=\"blue\")\n plt.ylabel(\"coefficient of variation\")\n plt.xlabel(\"Methods\")\n my_xticks = ['RPKM', 'raw counts']\n x = [1, 2]\n plt.xticks(x, my_xticks)\n plt.ylim(0, 400)\n plt.show()", "def plotBox(box):\n plt.plot([box.xll, box.xur, box.xur, box.xll, box.xll]\n ,[box.yll, box.yll, box.yur, box.yur, box.yll]\n , '-'\n )", "def box_plot(\n data,\n save_figure_path=None,\n fontsize=12,\n new_labels=(\"Imposter\", \"Genuine\"),\n figsize=(13, 7),\n):\n palette = {new_labels[0]: \"orange\", new_labels[1]: \"lightblue\"}\n data[\"Tag\"] = data[\"label\"]\n data.loc[data[\"label\"] == 0, \"Tag\"] = new_labels[0]\n data.loc[data[\"label\"] == 1, \"Tag\"] = new_labels[1]\n\n fig, ax = plt.subplots(1, 1, figsize=figsize)\n sns.boxplot(\n x=\"a1\",\n y=\"score\",\n hue=\"Tag\",\n data=data,\n linewidth=1.25,\n dodge=True,\n notch=True,\n palette=palette,\n ax=ax,\n )\n plt.xlabel(\"Subgroup\", fontsize=fontsize)\n plt.ylabel(\"Score\", fontsize=fontsize)\n ax.tick_params(axis=\"both\", labelsize=fontsize)\n plt.legend(loc=\"best\", fontsize=fontsize)\n plt.title(\n \"Score Distribution for Genuine and Imposter Pairs Across Subgroup\",\n fontsize=fontsize,\n )\n\n plt.tight_layout()\n # save figure\n if save_figure_path is not None:\n plt.savefig(save_figure_path, transparent=True)", "def check_outliers(df, col, cat):\n \n if len(cat) == 0:\n boxplot = df.boxplot(column=[col], rot = 90)\n else:\n for c in cat:\n boxplot = df.boxplot(column=[col], by=[c], rot = 90)", "def plot_result(d: dict, ylabel: str, title: str, save_path: str):\n # Parse the values\n keys = sorted(d.keys())\n data = np.zeros((len(d[keys[0]]), len(keys)))\n for i, k in enumerate(keys):\n data[:, i] = d[k]\n \n # Create the plot\n plt.figure(figsize=(12, 3.5)) # TODO: Better comparison\n plt.boxplot(data, labels=[str(k) if k % 20 == 0 else '' for k in keys], whis=[0, 100])\n plt.xticks(rotation=90)\n plt.xlabel(\"generations\")\n plt.yticks([i for i in range(11)])\n plt.ylabel(ylabel)\n # plt.ylim(0, max(np.max(data) * 1.05, 1.05))\n plt.ylim(0, 10) # TODO: Fixed to have fair comparison\n plt.grid()\n plt.tight_layout()\n plt.savefig(save_path + \".png\", bbox_inches='tight', pad_inches=0.02, dpi=500)\n plt.savefig(save_path + \".eps\", format='eps', bbox_inches='tight', pad_inches=0.02)\n # plt.show()\n plt.close()", "def combinedPlot(self):\n clf()\n # plot the line\n plot(self.x,self.averages)\n # plot the boxplot\n boxplot(self.y,positions=self.x,widths=0.5)\n xlabel('X Label (units)')\n ylabel('Y Label (units)')\n savefig('combined.png')" ]
[ "0.7449266", "0.72109646", "0.70043445", "0.6898402", "0.68509275", "0.679746", "0.6785153", "0.6746497", "0.6699128", "0.63946426", "0.63684636", "0.6300891", "0.6252812", "0.62450606", "0.61390984", "0.6083131", "0.60641783", "0.606384", "0.601854", "0.5990602", "0.5925783", "0.5851814", "0.583502", "0.582227", "0.5744817", "0.572286", "0.57111317", "0.5708474", "0.57063943", "0.5694" ]
0.7397594
1
Save an errorplot based on given data. Errorplot display the average value plus/minus the standard deviation for each point.
def save_error_plot(data, fname="error_plot.pdf", axis_labels=None, plot_title=None, plot_suptitle=None): from math import sqrt figure() transpose = lambda l: [[l[j][i] for j in range(len(l))] for i in range(len(l[0]))] avg = lambda l: sum(l)/len(l) st_dev = lambda (values, mean): sqrt( sum([(v-mean)**2 for v in values])/len(values) ) tr_data = transpose(data) avg_data = map(avg, tr_data) stdev_data = map(st_dev, zip(tr_data, avg_data)) errorbar(range(len(tr_data)), avg_data, stdev_data) # axis labels if not axis_labels == None: xlabel(axis_labels[0]) ylabel(axis_labels[1]) if not plot_title is None: title(plot_title) if not plot_suptitle is None: suptitle(plot_suptitle) savefig(fname)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def error():\n\n # Make data set using errors\n dataset_a = DataSet(oscillating,error_y=oscillating_error,plot='error_bar',label='Data and error')\n dataset_a.set_error(interval=5,width=1,cap=2)\n dataset_b = DataSet(oscillating,plot='error_shade',error_y=oscillating_error,order=0,colour='lightgrey',label='Error')\n dataset_c = DataSet(oscillating,plot='line',order=1,colour='firebrick',label='Data')\n\n # Make line graph with error bars\n plot_bar = Plot()\n plot_bar.set_legend(legend=True)\n plot_bar.add_dataset(dataset_a)\n plot_bar.plot()\n plot_bar.save(name='./figures/2d_error_bar',fmt='png')\n plot_bar.display()\n\n # Make line graph with shaded errors\n plot_shade = Plot()\n plot_shade.set_legend(legend=True,location='upper left')\n plot_shade.add_dataset(dataset_b)\n plot_shade.add_dataset(dataset_c)\n plot_shade.plot()\n plot_shade.save(name='./figures/2d_error_shade',fmt='png')\n plot_shade.display()", "def plot_errors(self):\n\n plt.title(\"Prediction Error\")\n plt.plot(self.errors)\n plt.ylabel(\"MSE (Mean Squared Error)\")\n plt.xlabel(\"Iteration\")\n plt.show()", "def plot_ave(results_list):\n x_range = range(len(results_list[0]))\n err_x, err_y, std_list = [], [], []\n\n for i in x_range:\n if i % 10 == 0:\n #get average for each generation\n column = [] \n for result in results_list:\n column.append(result[i])\n average = np.average(column)\n \n std_dev = np.std(column)\n err_x.append(i)\n err_y.append(average)\n std_list.append(std_dev)\n\n pylab.errorbar(err_x, err_y, yerr=std_list)\n pylab.show()", "def errorbar_plot(data, fname=None):\n def gen(dat):\n \"\"\" Generate mean and error terms for given data\n \"\"\"\n y_mean = []\n y_err = []\n for e in dat:\n y_mean.append(np.mean(e))\n y_err.append(np.std(e))\n return y_mean, y_err\n\n if isinstance(data['y_data'][0][0], str):\n for lab, dat in data['y_data']:\n y_mean, y_err = gen(dat)\n plt.errorbar(data['x_data'], y_mean, yerr=y_err, fmt='o', label=lab)\n\n plt.legend(loc='best')\n else:\n y_mean, y_err = gen(data['y_data'])\n plt.errorbar(data['x_data'], y_mean, yerr=y_err, fmt='o')\n\n if 'polyfit' in data['args'] and data['args']['polyfit']:\n coeffs = np.polyfit(data['x_data'], y_mean, 1)\n\n y_vec = np.polyval(coeffs, data['x_data'])\n plt.plot(data['x_data'], y_vec, label='${0:.2}x{2} {1:.2}$'.format(coeffs[0], abs(coeffs[1]), '+' if coeffs[1] > 0 else '-'))\n\n plt.legend(loc='best')\n\n plt.title(data['title'])\n plt.xlabel(data['x_label'])\n plt.ylabel(data['y_label'])\n\n Plotter.show(data['title'], fname=fname)", "def plot_errors(dat, title='Data', avg='mean', err='sem'):\n\n n_groups = len(dat)\n\n fig = plt.figure(figsize=[4, 5])\n ax = plt.gca()\n\n if avg == 'mean': avg_func = np.nanmean\n if avg == 'median': avg_func = np.nanmedian\n\n if err == 'sem': err_func = sem\n\n plt.errorbar(np.arange(1, n_groups+1), avg_func(dat, 1), yerr=err_func(dat, 1), xerr=None, fmt='.',\n markersize=22, capsize=10, elinewidth=2, capthick=2)\n\n ax.set_xlim([0.5, n_groups+0.5])\n\n # Titles & Labels\n ax.set_title(title, fontsize=16)\n ax.set_xlabel('Noise Levels')\n ax.set_ylabel('Error')\n\n # Set the top and right side frame & ticks off\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n # Set linewidth of remaining spines\n ax.spines['left'].set_linewidth(2)\n ax.spines['bottom'].set_linewidth(2)", "def plot_mean_std(data,ax,label=None,show_error=True):\n x = np.arange(1,100)\n mean = np.array([np.mean(data_n) for data_n in data])\n if show_error: std = np.array([np.std(data_n) for data_n in data])\n ax.plot(x,mean,label=label)\n if show_error: ax.fill_between(x,mean-std,mean+std,alpha=0.3)", "def _plot_errors(self):\n for task_id, loss_type in self.task_ids.iteritems():\n x = np.arange(len(self.training_errors[task_id]))\n fig, ax = plt.subplots(1, 1)\n ax.set_xlabel('Number of epochs of training')\n if loss_type is LossTypes.mse:\n ax.set_ylabel('RMSE Error')\n elif loss_type is LossTypes.cross_entropy:\n ax.set_xlabel('(1 - accuracy)')\n plt.plot(x, self.training_errors[task_id], 'r', label='training')\n plt.plot(x, self.validation_errors[task_id], 'b', label='validation')\n plt.legend(loc=\"best\", framealpha=0.3)\n fig.savefig(\"error-curve-task-{}.png\".format(task_id))\n plt.close('all')", "def plot_train_test_errors(train_errors, test_errors, lambda_str , K , path, rng):\n plt.plot(range(rng), train_errors, marker='o', label='Training Data');\n plt.plot(range(rng), test_errors, marker='v', label='Test Data');\n plt.title('ALS-WR Learning Curve, lambda = %s, K = %d'%(lambda_str, K))\n plt.xlabel('Number of Epochs');\n plt.ylabel('RMSE');\n plt.legend()\n plt.grid()\n plt.savefig(\"../results/test_train_rmse_\"+path)\n plt.show()", "def plotErr(self):\n if self.xp and self.wp:\n # plot the spectra\n w=self.ws.value(np.array(self.xp))\n self.errcurve,=self.erraxes.plot(self.xp,self.wp-w,linewidth=0.5,linestyle='',marker='o',color='b')\n if self.dxp and self.dwp:\n # plot the spectra\n dw=self.ws.value(np.array(self.dxp))\n self.delerrcurve,=self.erraxes.plot(self.dxp,self.dwp-dw,linewidth=0.5,linestyle='',marker='x',color='b')", "def error_plot(training_costs, test_costs, learning_rate, accuracy, test_accuracy, val_accuracy, layers, data_size,\n n_neighbours, dropout_rate):\n\n plt.plot(training_costs, label=\"Training loss\")\n plt.plot(test_costs, label=\"Test loss\")\n plt.xlabel(\"Iterations\", size='medium')\n plt.ylabel(\"Cost function (%)\", size='medium')\n plt.suptitle(\"Cost function while training the neural network\", size='medium', ha='center')\n plt.title(\"layers: {} with dropout rate of {}, learning rate: {}\".format(layers, dropout_rate, learning_rate),\n size='small', ha='center')\n plt.figtext(0.77, 0.35, \"Training accuracy\\n{0:.2f}%\".format(accuracy), size='medium')\n plt.figtext(0.77, 0.25, \"Test accuracy\\n{0:.2f}%\".format(test_accuracy), size='medium')\n plt.figtext(0.77, 0.15, \"Validation accuracy\\n{0:.2f}%\".format(val_accuracy), size='medium')\n if n_neighbours == 0:\n plt.figtext(0.77, 0.80, \"Neighbours\\nexcluded\", size='medium')\n else:\n plt.figtext(0.77, 0.80, \"{} neighbours\\nincluded\".format(n_neighbours), size='medium')\n plt.figtext(0.77, 0.70, \"{}\\nsamples\".format(data_size))\n plt.legend(loc='right', bbox_to_anchor=(1.39, 0.5))\n plt.subplots_adjust(right=0.75)\n working_dir = os.path.dirname(os.path.abspath(__file__))\n saving(working_dir + \"/output_ANN/error_plots/{}_error_{}\".format(n_neighbours, data_size))", "def plot_loss(training_errors, validation_errors):\n plt.xscale('Log')\n plt.xlabel('Epochs')\n plt.ylabel('Mean Actual Error')\n plt.plot(training_errors, label = \"Training Error\", \\\n color = 'blue')\n plt.plot(validation_errors, label = \"Validation Error\", \\\n color = 'red')\n plt.legend()\n # Saves plot automatically, adjust filename as needed.\n plt.savefig('reservoir_05whdens_100h_7spec_test_3.png')\n plt.show()", "def plot_data(self, plot_input=True, plot_fitted=True,plotfile=None, show=None):\n if not self.fitted:\n raise RuntimeError(\"Please run fit() before attempting to plot the results\")\n\n fitted_data = self.data_summary(printout=False)\n fitted_mean = fitted_data['mean'].to_numpy().reshape((self.npoints,self.ndim))\n print(fitted_mean.shape)\n fitted_sigma = fitted_data['sd'].to_numpy().reshape((self.npoints,self.ndim))\n if self.ndim==np.int(2) and isinstance(self.ndim, int):\n blue, _, red, *_ = sns.color_palette()\n f, ax = plt.subplots(1, 1, figsize=(5, 4))#, gridspec_kw=dict(width_ratios=[4, 3]))\n\n sns.scatterplot(x=self.data[:,0], y=self.data[:,1])\n if plot_input:\n ax.errorbar(x=self.data[:,0], y=self.data[:,1],\n xerr=self.sigma[:,0], yerr=self.sigma[:,1],fmt='o',label='input data')\n \n if plot_fitted:\n ax.errorbar(x=fitted_mean[:,0], y=fitted_mean[:,1],\n xerr=fitted_sigma[:,0], yerr=fitted_sigma[:,1],fmt='o',label='inferred data')\n \n mu_post = self.trace.posterior[\"mu\"].mean(axis=(0, 1)).data\n \n sigma_post = self.trace.posterior[\"cov\"].mean(axis=(0, 1)).data\n \n var_post, U_post = np.linalg.eig(sigma_post)\n angle_post = 180.0 / np.pi * np.arccos(np.abs(U_post[0, 0]))\n\n e_post = Ellipse(\n mu_post,\n 2 * np.sqrt(5.991 * var_post[0]),\n 2 * np.sqrt(5.991 * var_post[1]),\n angle=angle_post,\n )\n e_post.set_alpha(0.5)\n e_post.set_facecolor(blue)\n e_post.set_zorder(10)\n ax.add_artist(e_post)\n rect_post = plt.Rectangle((0, 0), 1, 1, fc=blue, alpha=0.5)\n ax.legend(\n [rect_post],\n [\"Estimated 95% density region\"],\n loc=2,\n )\n #plt.show()\n\n elif self.ndim > 2 and isinstance(int, self.ndim) and np.isfinite(self.ndim):\n #raise NotImplementedError(\"This routine doesn't support plotting correlations in more than 2 dimensions yet!\")\n rows = self.ndim - 1\n cols = self.ndim - 1\n fig = plt.figure()\n gs = fig.add_gridSpec(rows, cols,left=0.1, right=0.9, bottom=0.1, top=0.9,\n wspace=0.05, hspace=0.05)\n for i in range(self.ndim - 1):\n for j in range(i+1,self.ndim - 1):\n ax = fig.add_subplot(gs[i,j])\n #plot the data points\n sns.scatterplot(self.data[:,i], self.data[:,j], ax=ax)\n if plot_input:\n ax.errorbar(x=self.data[:,i], y=self.data[:,j],\n xerr=self.sigma[:,i], yerr=self.sigma[:,j])\n \n if plot_fitted:\n ax.errorbar(x=fitted_mean[:,i], y=fitted_mean[:,j],\n xerr=fitted_sigma[:,i], yerr=fitted_sigma[:,j])\n \n mu_post = self.trace.posterior[\"mu\"].mean(axis=(i, j)).data\n \n sigma_post = self.trace.posterior[\"cov\"].mean(axis=(i, j)).data\n \n var_post, U_post = np.linalg.eig(sigma_post)\n angle_post = 180.0 / np.pi * np.arccos(np.abs(U_post[0, 0]))\n \n e_post = Ellipse(\n mu_post,\n 2 * np.sqrt(5.991 * var_post[0]),\n 2 * np.sqrt(5.991 * var_post[1]),\n angle=angle_post,\n )\n e_post.set_alpha(0.5)\n e_post.set_facecolor(blue)\n e_post.set_zorder(10)\n ax.add_artist(e_post)\n \n else:\n raise ValueError(\"Ndim is either less than 2 or is not an integer!\")\n \n if isinstance(plotfile, str):\n plt.save(plotfile)\n elif not show:\n raise TypeError(\"plotfile must be a string\")\n if show:\n plt.show()\n elif plotfile is not None:\n plt.close()", "def generation_plot(file, errorbars=True):\r\n import pylab\r\n import matplotlib.font_manager \r\n \r\n generation = []\r\n psize = []\r\n worst = []\r\n best = []\r\n median = []\r\n average = []\r\n stdev = []\r\n reader = csv.reader(file)\r\n for row in reader:\r\n generation.append(int(row[0]))\r\n psize.append(int(row[1]))\r\n worst.append(float(row[2]))\r\n best.append(float(row[3]))\r\n median.append(float(row[4]))\r\n average.append(float(row[5]))\r\n stdev.append(float(row[6]))\r\n stderr = [s / math.sqrt(p) for s, p in zip(stdev, psize)]\r\n \r\n data = [average, median, best, worst]\r\n colors = ['black', 'blue', 'green', 'red']\r\n labels = ['average', 'median', 'best', 'worst']\r\n figure = pylab.figure()\r\n if errorbars:\r\n pylab.errorbar(generation, average, stderr, color=colors[0], label=labels[0])\r\n else:\r\n pylab.plot(generation, average, color=colors[0], label=labels[0])\r\n for d, col, lab in zip(data[1:], colors[1:], labels[1:]):\r\n pylab.plot(generation, d, color=col, label=lab)\r\n pylab.fill_between(generation, data[2], data[3], color='#e6f2e6')\r\n pylab.grid(True)\r\n ymin = min([min(d) for d in data])\r\n ymax = max([max(d) for d in data])\r\n yrange = ymax - ymin\r\n pylab.ylim((ymin - 0.1*yrange, ymax + 0.1*yrange)) \r\n prop = matplotlib.font_manager.FontProperties(size=8) \r\n pylab.legend(loc='upper left', prop=prop) \r\n pylab.xlabel('Generation')\r\n pylab.ylabel('Fitness')\r\n pylab.show()", "def plot_error_distribution(records, predictions, fig_savepath, figsize=(3.54, 2.0), format='PNG', dpi=300):\n plt.figure(figsize=figsize)\n error = predictions - records\n # plt.hist(error,bins=25)\n # plt.hist(error, 50, density=True,log=True, facecolor='g', alpha=0.75)\n plt.hist(error, 20, density=True, log=True, )\n plt.xlabel('Prediction Error')\n plt.ylabel('count')\n plt.tight_layout()\n plt.savefig(fig_savepath, format=format, dpi=dpi)\n # plt.show()", "def plot_model_error(x_train, x_test, y_train, y_test, model, \\\n save_fig_fname, label):\n # Assertions\n assert isinstance(x_train, np.ndarray)\n assert isinstance(x_test, np.ndarray)\n assert isinstance(y_train, np.ndarray)\n assert isinstance(y_test, np.ndarray)\n assert isinstance(save_fig_fname, (str, type(None))), \\\n 'Wrong Type: desired output file name must be a string'\n if type(save_fig_fname) is str:\n assert save_fig_fname.endswith('.png'),\\\n 'output_file string must include the .png extension'\n\n # Functionality\n y_train = y_train.reshape(-1,1)\n y_test = y_test.reshape(-1,1)\n plt.figure(figsize=(8, 10), dpi=100)\n plt.subplot(211)\n plt.scatter(y_train, model.predict(x_train), color='r', label='train')\n plt.scatter(y_test, model.predict(x_test), color='blue', label='test')\n plt.xlabel('Actual %s' % label, fontsize=12)\n plt.ylabel('Predicted %s' %label, fontsize=12)\n plt.legend(loc='upper center')\n\n plt.subplot(212)\n plt.scatter(y_train, model.predict(x_train)-y_train, color = 'r', \\\n label = 'train', marker= 'x')\n plt.scatter(y_test, model.predict(x_test)-y_test, color = 'blue', \\\n label = 'test', marker = 'x')\n plt.axhline(0, ls='--')\n plt.xlabel('Actual %s' % label, fontsize=12)\n plt.ylabel('Prediction error %s' % label, fontsize=12)\n plt.legend(loc='upper center')\n\n if save_fig_fname is not None:\n plt.savefig(save_fig_fname)\n else:\n plt.show()", "def plot_errors(loss_train, loss_val, jet):\n plt.plot(list(range(len(loss_train))), loss_train, 'g', label='Training loss')\n plt.plot(list(range(len(loss_val))), loss_val, 'b', label='Validation loss')\n plt.title('Training and Validation loss for jet: {jet}'.format(jet=jet))\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.legend()\n plt.show()", "def train_test_error(e_train, e_test, model_params):\n\n fig = plt.figure(figsize=FIG_SIZE)\n plt.plot(model_params, e_train, label='Training Set')\n plt.plot(model_params, e_train, label='Test Set')\n plt.xlabel('Model Parameter')\n plt.ylabel('MSE of model')\n plt.legend()\n\n return fig", "def update_error_plot_values(self):\n check_is_dir(\"mp_plotdata\")\n check_is_dir(\"mp_plotdata/\"+self.name)\n\n data_train = self.get_data.evaluate_on_train_data(n_samples=50)\n data_validation = self.get_data.evaluate_on_validation_data(n_samples=50)\n\n for data, _set in zip([data_train, data_validation], [\"train\", \"validation\"]):\n object_real_samples = data[\"object_real_samples\"].reshape(\n -1,self.get_data.N, self.get_data.N, 1)\n object_imag_samples = data[\"object_imag_samples\"].reshape(\n -1,self.get_data.N, self.get_data.N, 1)\n diffraction_samples = data[\"diffraction_samples\"].reshape(\n -1,self.get_data.N, self.get_data.N, 1)\n\n filename = \"mp_plotdata/\"+self.name+\"/\"+_set+\"_log.dat\"\n if not os.path.exists(filename):\n with open(filename, \"w\") as file:\n file.write(\"# time[s] epoch real_loss imag_loss reconstruction_loss\\n\")\n\n # real loss\n real_loss = self.sess.run(self.nn_nodes[\"real_loss\"],\n feed_dict={self.x:diffraction_samples,\n self.real_actual:object_real_samples})\n\n # imaginary loss\n imag_loss = self.sess.run(self.nn_nodes[\"imag_loss\"],\n feed_dict={self.x:diffraction_samples,\n self.imag_actual:object_imag_samples})\n\n # reconstruction\n reconstruction_loss = self.sess.run(self.nn_nodes[\"reconstruction_loss\"],\n feed_dict={self.x:diffraction_samples})\n\n datastring = \"\"\n datastring+= str(time.time())\n datastring+= \" \"\n datastring+= str(self.epoch)\n datastring+= \" \"\n datastring+= str(real_loss)\n datastring+= \" \"\n datastring+= str(imag_loss)\n datastring+= \" \"\n datastring+= str(reconstruction_loss)\n datastring+= \"\\n\"\n\n with open(filename, \"a\") as file:\n file.write(datastring)\n\n\n for key in self.experimental_traces.keys():\n trace = self.experimental_traces[key]\n\n # reconstruction\n reconstruction_loss = self.sess.run(self.nn_nodes[\"reconstruction_loss\"],\n feed_dict={self.x:trace})\n\n logger_name = key+\"_reconstructed\"\n filename = \"mp_plotdata/\"+self.name+\"/\"+logger_name+\".dat\"\n if not os.path.exists(filename):\n with open(filename, \"w\") as file:\n file.write(\"# time[s] epoch reconstruction_loss\\n\")\n\n datastring = \"\"\n datastring+= str(time.time())\n datastring+= \" \"\n datastring+= str(self.epoch)\n datastring+= \" \"\n datastring+= str(reconstruction_loss)\n datastring+= \"\\n\"\n with open(filename, \"a\") as file:\n file.write(datastring)", "def plot_error(self, maxstep=20):\n plt.ion()\n plt.xlabel(\"step\")\n plt.ylabel(\"Ave Logloss (bits)\")\n train_errors = []\n if self.dataset.test:\n test_errors = []\n for i in range(maxstep):\n self.learn(1)\n train_errors.append( sum(self.logloss(tple) for tple in self.dataset.train)\n /len(self.dataset.train))\n if self.dataset.test:\n test_errors.append( sum(self.logloss(tple) for tple in self.dataset.test)\n /len(self.dataset.test))\n plt.plot(range(1,maxstep+1),train_errors,\n label=str(self.num_classes)+\" classes. Training set\")\n if self.dataset.test:\n plt.plot(range(1,maxstep+1),test_errors,\n label=str(self.num_classes)+\" classes. Test set\")\n plt.legend()\n plt.draw()", "def plotErrors(losses, model_title ='Shallow Network, SGD, Batch Size = 10'):\n fig, axes = plt.subplots()\n\n x = np.arange(len(losses))\n\n axes.plot(x, losses)\n axes.set_ylabel(\"Loss (cross entropy)\")\n axes.set_xlabel(\"Number of iterations\")\n axes.set_title(model_title) \n\n plt.show() \n\n return None", "def reconstruction_errors(identifier, train_errors, vali_errors,\n generated_errors, random_errors):\n print(identifier)\n fig, axarr = plt.subplots(4, 1, sharex=True, figsize=(4, 8))\n axarr[0].hist(train_errors, normed=1, color='green', bins=50)\n axarr[0].set_title(\"train reconstruction errors\")\n axarr[1].hist(vali_errors, normed=1, color='blue', bins=50)\n axarr[1].set_title('vali reconstruction errors')\n axarr[2].hist(generated_errors, normed=1, color='pink', bins=50)\n axarr[2].set_title('generated reconstruction errors')\n axarr[3].hist(random_errors, normed=1, color='grey', bins=50)\n axarr[3].set_title('random reconstruction errors')\n for ax in axarr:\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"bottom\"].set_visible(False)\n ax.spines[\"right\"].set_visible(False)\n ax.spines[\"left\"].set_visible(False)\n ax.tick_params(bottom='off', left='off')\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n axarr[3].set_xlim(0, 0.05)\n plt.tight_layout()\n plt.savefig('./experiments/plots/' + identifier + '_reconstruction_errors.png')\n return True", "def plot_error(k_vals, error):\n\n plt.plot(k_vals,error)\n plt.xlabel('k-value')\n plt.ylabel('Cost')\n plt.show()", "def plot(self) -> Figure:\n ax = self.setup_plot()\n ax.errorbar(\n self.doses,\n self.means,\n yerr=self.errorbars(),\n label=\"Mean ± 95% CI\",\n **plotting.DATASET_POINT_FORMAT,\n )\n ax.legend(**plotting.LEGEND_OPTS)\n return ax.get_figure()", "def plot_lastreviews_means_and_errors(H_in_HL_mean, H_in_HL_error, L_in_HL_mean, L_in_HL_error,\n H_in_HH_mean, H_in_HH_error, H_in_HM_mean, H_in_HM_error,\n M_in_HM_mean, M_in_HM_error):\n # plot the result in a nice plot\n plt.figure(figsize=(12, 9)) \n\n # create the fig. and axes.\n ax = plt.subplot(111)\n ax.spines[\"top\"].set_visible(False) \n ax.spines[\"right\"].set_visible(False)\n\n # define the color to use\n color_1 = rgb_to_matplot_lib(strong_green)\n color_2 = rgb_to_matplot_lib(light_green)\n color_3 = rgb_to_matplot_lib(strong_red)\n color_4 = rgb_to_matplot_lib(light_green)\n color_5 = rgb_to_matplot_lib(orange)\n\n\n\n # axis \n ax.set_ylabel('Rating', fontsize = 14)\n ax.tick_params(axis='both', labelsize=14)\n\n # plot small dash lines to follow the grading \n for y in np.arange(4.0, 4.6, 0.1): \n ax.plot(range(0, 45), [y] * len(range(0, 45)), \"--\", lw=0.5, color=\"black\", alpha=0.3)\n\n\n # set titles\n ax.set_title('10+ reviews average rating for each case in each group', fontsize = 14)\n\n plt.ylim([1,5.1])\n plt.xlim([0,5.1])\n\n plt.errorbar(1, H_in_HH_mean, H_in_HH_error, lineStyle= None, capsize=5, marker=\"^\", color=color_1)\n plt.errorbar(2, H_in_HL_mean, H_in_HL_error, lineStyle= None, capsize=5, marker=\"^\", color=color_2)\n plt.errorbar(3, L_in_HL_mean, L_in_HL_error, lineStyle= None, capsize=5, marker=\"^\", color=color_3)\n plt.errorbar(4, H_in_HM_mean, H_in_HM_error, lineStyle= None, capsize=5, marker=\"^\", color=color_4)\n plt.errorbar(5, M_in_HM_mean, M_in_HM_error, lineStyle= None, capsize=5, marker=\"^\", color=color_5)\n\n plt.text(0.8, 4.01, \"({:04.3f})\".format(H_in_HH_mean), fontsize=14, color=color_1)\n plt.text(1.8, 4.01, \"({:04.3f})\".format(H_in_HL_mean), fontsize=14, color=color_2) \n plt.text(2.8, 4.01, \"({:04.3f})\".format(L_in_HL_mean), fontsize=14, color=color_3) \n plt.text(3.8, 4.01, \"({:04.3f})\".format(H_in_HM_mean), fontsize=14, color=color_4) \n plt.text(4.8, 4.01, \"({:04.3f})\".format(M_in_HM_mean), fontsize=14, color=color_5) \n\n\n # set ticks label\n ax.set_xticks(range(1,6))\n ax.set_xticklabels(('H in HH', 'H in HL', 'L in HL', 'H in HM', 'M in HM'))\n\n #set ticks color\n colors = [color_1, color_2, color_3, color_4, color_5]\n for xtick, color in zip(ax.get_xticklabels(), colors):\n xtick.set_color(color)\n\n plt.ylim([4,4.6])\n plt.xlim([0.5,5.5])\n plt.show()", "def _single_prediction_error_plot(self, scatter_data, color):\n # figure\n p = default_figure(\n {\n \"tools\": \"pan,wheel_zoom,box_zoom,reset\",\n \"toolbar_location\": \"right\"\n }\n )\n\n # scatter\n p.scatter(scatter_data[0], scatter_data[1], color=color, size=16, fill_alpha=0.8)\n\n # baseline of x=y\n slope = Slope(\n gradient=1,\n y_intercept=0,\n line_width=1,\n line_color=self.plot_design.models_dummy_color,\n line_dash=\"dashed\"\n )\n p.add_layout(slope)\n\n # plot specific styling\n p.xaxis.axis_label = \"Actual\"\n p.yaxis.axis_label = \"Predicted\"\n\n formatter = FuncTickFormatter(code=self._formatter_code) # negative numbers are having a wacky formatting\n # formatters must be created independently, cannot be reused between plots\n p.xaxis.formatter = formatter\n p.yaxis.formatter = formatter\n\n p.toolbar.autohide = True\n\n return p", "def plot_errors_violin(dat, title=None, x_axis='nlvs', y_label=None, save_fig=False, save_name=None):\n\n fig = plt.figure(figsize=[8, 6])\n\n ax = sns.violinplot(data=dat.T, cut=0, scale='area', color='#2a60b7', saturation=0.75)#, bw=2)\n\n # X-ticks & label for noise levels\n if x_axis == 'nlvs':\n plt.xticks([0, 1, 2, 3, 4],\n [0.00, 0.025, 0.050, 0.100, 0.150]);\n ax.set_xlabel('Noise Levels', fontsize=14)\n # X-ticks & label for number of peaks\n if x_axis == 'n_oscs':\n plt.xticks([0, 1, 2, 3, 4],\n [0, 1, 2, 3, 4]);\n ax.set_xlabel('Number of Peaks', fontsize=14)\n\n # Titles & Labels\n if title:\n ax.set_title(title, fontsize=16)\n if not y_label:\n y_label = 'Error'\n ax.set_ylabel(y_label, fontsize=14)\n\n # Set the top and right side frame & ticks off\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n # Set linewidth of remaining spines\n ax.spines['left'].set_linewidth(2)\n ax.spines['bottom'].set_linewidth(2)\n\n if save_fig:\n\n save_name = 'plts/' + save_name + '_syn_error.pdf'\n plt.savefig(save_name, bbox_inches='tight', dpi=300)", "def plot_SS(H,E,C,nframes,nres,outdir,name_mod):\n\tH_av = np.zeros(nres) ; H_err = np.zeros(nres) \n\tE_av = np.zeros(nres) ; E_err = np.zeros(nres) \n\tC_av = np.zeros(nres) ; C_err = np.zeros(nres) \n\tfor ind in range(nres):\n\t\tH_av[ind] = sum(H[ind,:])/nframes ; H_err[ind] = np.std(H[ind,:])\n\t\tE_av[ind] = sum(E[ind,:])/nframes ; E_err[ind] = np.std(E[ind,:])\n\t\tC_av[ind] = sum(C[ind,:])/nframes ; C_err[ind] = np.std(C[ind,:])\n\n\tplt.clf()\n\tplt.plot(H_av,label='Helix',c='r') ; plt.errorbar(range(nres), H_av,yerr=H_err/(np.sqrt(nframes)-1), fmt='o',color='r')\n\tplt.plot(E_av,label='Beta Sheet',c='b') ; plt.errorbar(range(nres), E_av,yerr=E_err/(np.sqrt(nframes)-1), fmt='o',color='b')\n\tplt.plot(C_av,label='Coil',c='k') ; plt.errorbar(range(nres), C_av,yerr=C_err/(np.sqrt(nframes)-1), fmt='o',color='k')\n\tplt.legend(loc=1)\n\tplt.savefig(outdir+\"SS\" + name_mod + \".png\")\n\tnp.savetxt(outdir+\"SS_H_av\" + name_mod + \".npy\",[H_av,H_err])\n\tnp.savetxt(outdir+\"SS_E_av\" + name_mod + \".npy\",[E_av,E_err])\n\tnp.savetxt(outdir+\"SS_C_av\" + name_mod + \".npy\",[C_av,C_err])\n\treturn None", "def graph_error_over_generation(self, with_scatter=False):\n mean_errors = []\n best_individual_errors = []\n\n for i in range(len(self.generations)):\n best_individual_errors.append(\n self.get_low_fitness_individual(i).fitness)\n mean_errors.append(\n np.mean([j.fitness for j in self.generations[i]]))\n\n plt.figure(figsize=(10, 5))\n ax = plt.subplot()\n if with_scatter:\n self.plot_error_scatter()\n mean_error_line, = plt.plot(\n range(len(self.generations)),\n mean_errors,\n label='Mean Error of Individuals',\n color='b')\n best_individual_error_line, = plt.plot(\n range(len(self.generations)),\n best_individual_errors,\n label='Lowest Error of an Individual',\n color='green')\n plt.xticks(\n [i for i in range(0, self.config.max_generations, 5)],\n [i for i in range(0, self.config.max_generations, 5)])\n hfont = {'fontname': 'Helvetica'}\n plt.xlabel('Generation', **hfont)\n plt.ylabel('Error', **hfont)\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n\n plt.legend(\n handles=[mean_error_line, best_individual_error_line],\n loc='upper right',\n bbox_to_anchor=(1, 1.1))\n plt.savefig('figures/Parameter Tuning Figure/error_over_generation.svg')", "def classificationError():\n print('Computing Classification Test Error')\n\n dl1, dl2 = CIFAR10TestLoader() \n err1, err2 = [], []\n\n # load every model \n with open('classification_error.csv','w') as clf:\n writer = csv.writer(clf)\n writer.writerow(['Classification Error Dataset 1','Classification Error Dataset 2'])\n\n for i in tqdm(range(5,95,2)):\n mtl = MTL()\n PATH = os.getcwd() + '/mtl_model_{d1}_vs_{d2}.pth'.format(d1=i, d2=100-i)\n mtl.load_state_dict(state_dict=torch.load(PATH))\n\n e1, e2 = accuracy(dl1, mtl), accuracy(dl2, mtl)\n err1.append(e1)\n err2.append(e2)\n writer.writerow([e1,e2])\n \n fig, ax_left = plt.subplots()\n ax_right = ax_left.twinx()\n ax_left.plot([0.05 + 0.02* i for i in range(45)], err1, color='black')\n ax_right.plot([0.05 + 0.02* i for i in range(45)], err2, color='red')\n ax_right.set_ylabel('Dataset 2 (red) accuracy')\n ax_left.set_ylabel('Dataset 1 (black) accuracy')\n ax_left.set_xlabel('Percentage of points from dataset 1 in training set')\n plt.title('Classification Accuracy over both datasets')\n plt.savefig('classification_error_plots.pdf')", "def plot_model_error(self, var, obslabels=None):\n\n fig = plt.figure()\n gs = gridspec.GridSpec(1, 2, wspace=0.05, hspace=0.05, bottom=0.2, width_ratios=[3, 1])\n ax = fig.add_subplot(gs[0])\n\n # 1 vs. 2\n self._draw_error_scatter(1, 2, var, color='red', marker='o', ax=ax, obslabels=obslabels)\n\n # 1 vs. 3\n self._draw_error_scatter(1, 3, var, color='green', marker='*', ax=ax, obslabels=obslabels)\n\n # 1 vs. 4\n self._draw_error_scatter(1, 4, var, color='blue', marker='^', ax=ax, obslabels=obslabels)\n\n # 2 vs. 3\n self._draw_error_scatter(2, 3, var, color='grey', marker='x', ax=ax, obslabels=obslabels)\n\n # 2 vs 4\n self._draw_error_scatter(2, 4, var, color='m', marker='+', ax=ax, obslabels=obslabels)\n\n # 3 vs 4\n self._draw_error_scatter(3, 4, var, color='c', marker='h', ax=ax, obslabels=obslabels)\n\n if ax is not None:\n ax.legend(prop={'size': 8}, ncol=1, fancybox=True, loc='upper left')\n ax.set_xlabel('$\\epsilon$ (observation X)')\n ax.set_ylabel('$\\epsilon$ (observation Y)')\n\n xmi, xma = ax.get_xlim()\n ymi, yma = ax.get_ylim()\n\n ax.set_ylim(min(xmi, ymi), max(xma, yma))\n ax.set_xlim(min(xmi, ymi), max(xma, yma))\n ax.grid()\n ax.set_title('Comparison of model errors: ' + var.upper())\n ax.plot(ax.get_xlim(), ax.get_xlim(), 'k--') # 1:1 line\n return fig" ]
[ "0.7467149", "0.71385247", "0.6977244", "0.68285197", "0.6820716", "0.6669263", "0.6642838", "0.6629824", "0.65923953", "0.6511317", "0.65044206", "0.63321745", "0.6321649", "0.62116826", "0.6172719", "0.6158829", "0.6145462", "0.61333287", "0.6120439", "0.6106668", "0.610414", "0.6057554", "0.6038864", "0.6020776", "0.6017311", "0.59903675", "0.59701616", "0.59255904", "0.59151214", "0.59130836" ]
0.78207517
0
Generate a MPG video showing multiple scatter plots.
def anim_scatter_plot(points_list, values, fname="anim_scatter.mpg", fps=2, *args, **kwargs): print "Genrating temp images" for idx, pts in enumerate(points_list): print "\tPlot %i of %i" % (idx, len(points_list)) scatter_plot(pts, values, "_tmp_%i.png" % idx, *args, **kwargs) print "Creating animation" os.system("mencoder 'mf://_tmp_*.png' -mf type=png:fps=%i -ovc\ lavc -lavcopts vcodec=wmv2 -oac copy -o %s" % (fps, fname)) print "Removing temp files" os.system("rm -f _tmp_*.png")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_video(all_obj_locs, fps=30):\n i = 0\n print(len(all_obj_locs[::STEP]))\n for f in all_obj_locs[::STEP]:\n plt.figure(figsize=(SIZE * 2, SIZE), dpi=80)\n plt.ylim([-LANE_LENGTH / 4 + 25, LANE_LENGTH / 4 + 75])\n plt.xlim([-50, LANE_LENGTH + 50])\n x_s = [p[1] for p in f]\n y_s = [p[0] for p in f]\n s = 10\n plt.plot([0, 0], [0 - MARGIN, LANE_WIDTH + MARGIN], color=\"red\")\n plt.plot([LANE_LENGTH + MARGIN, LANE_LENGTH + MARGIN], [0 - MARGIN, LANE_WIDTH + MARGIN], color=\"red\")\n plt.plot([0, LANE_LENGTH + MARGIN], [0 - MARGIN, 0 - MARGIN], color=\"red\")\n plt.plot([0, LANE_LENGTH + MARGIN], [LANE_WIDTH + MARGIN, LANE_WIDTH + MARGIN], color=\"red\")\n plt.scatter(x_s, y_s, s=s)\n x_s_pins = init_pins()[:, 0]\n y_s_pins = init_pins()[:, 1]\n plt.scatter(y_s_pins, x_s_pins, s=3, color=\"black\")\n plt.savefig(\"data/frame\" + str(i) + \".png\")\n plt.close()\n # plt.show()\n i += 1\n create_video_from_frames(len(all_obj_locs[::STEP]), fps / STEP / DT)", "def generate_plots(path):\n videos = glob(path + '/*.mkv')\n print(path, len(videos), videos)\n\n if len(videos) == 0:\n return\n else:\n videos = videos[0]\n\n metadata_list = glob(path + '/metadata.txt')\n #print(path, len(metadata_list), metadata_list)\n\n if len(metadata_list) == 0:\n return \n\n P = Preprocessor()\n P.import_video(str(videos))\n P.read_metadata(path)\n P.preprocess()\n Im = P.frames_processed\n if len(Im) == 0:\n print(len(Im))\n return\n\n z_start = P.z_start\n z_end = P.z_end\n\n mean, cov = analyze_image(Im)\n\n window_size = 10\n mean_smoothed = smoothing.mean_moving_average(mean, window_size)\n cov_smoothed = smoothing.cov_moving_average(cov, window_size)\n\n c = CubicFitRotated()\n c.fit(mean=mean_smoothed, cov=cov_smoothed, z_start=z_start, z_end=z_end)\n\n try:\n os.mkdir(path + '/analysis')\n path += '/analysis'\n except OSError:\n pass\n\n\n plots.plot_mean(mean, z_start, z_end).savefig(path + '/beam_center.png')\n plots.plot_beta(cov, z_start, z_end).savefig(path + '/sigma_squared.png')\n\n export.export_mean(mean = mean, filename = path + '/center.csv', z_start = z_start, z_end = z_end)\n export.export_cov(cov = cov, filename = path + '/cov.csv', z_start = z_start, z_end = z_end)\n\n plt.close('all')", "def make_video(org_arr, pred_arr, file_name):\n fig = plt.figure()\n ax1 = fig.add_subplot(1,2,1)\n ax2 = fig.add_subplot(1,2,2)\n ax1.set_title('Actual')\n ax2.set_title('Predicted')\n\n im_list = []\n org_length = org_arr.shape[0]\n total_length = pred_arr.shape[0]\n \n for t in range(total_length):\n title = fig.text(0.5, 0.85, \"t = \" + str(t + 1), fontsize = \"large\")\n\n if t < org_length:\n im1 = ax1.imshow(org_arr[t])\n else:\n im1 = ax1.imshow(np.zeros(org_arr.shape[1:]))\n \n im2 = ax2.imshow(pred_arr[t])\n im_list.append([im1, im2, title])\n \n ani = animation.ArtistAnimation(fig, im_list, interval=500)\n\n ani.save(file_name) \n plt.close(fig)", "def makeVideo(self):\n \n #from morphforge.morphology.util import TriMeshBuilderVerySimple\n import sys\n sys.path.append('/usr/share/pyshared/')\n \n #import morphforge\n from morphforge.morphology.mesh import MeshBuilderRings\n MonkeyPatchMayaVi()\n #import enthought.mayavi.mlab as mlab\n from mayavi import mlab\n \n assert len(self.morphs)==1\n mesh = MeshBuilderRings().build(self.morphs[0])\n \n \n #mlab.options.offscreen = True\n \n \n @mlab.show\n @mlab.animate(delay=100 )#, ui=False) #(delay=500, ui=False)\n def _showSimpleCylinders():\n \n f = mlab.figure( bgcolor=None, fgcolor=None, engine=None, size=(1024, 768))\n #f = mlab.gcf() \n #c = TriMeshBuilderVerySimple(self.morphs[0])\n #mlab.triangular_mesh(c.x, c.y, c.z, c.triangles, colormap=self.colormap)\n mlab.triangular_mesh(mesh.vertices[:,0], mesh.vertices[:,1], mesh.vertices[:,2], mesh.triangles, colormap=self.colormap)\n \n for i in itertools.count():\n print i\n f.scene.camera.azimuth(0.1)\n mlab.savefig('/home/michael/Desktop/out/O%04d.png'%i)#, size=(1024,768))\n f.scene.render()\n if i> 3600:\n break\n yield\n \n _showSimpleCylinders()", "def create_video(video):\n fig, ax = plt.subplots()\n plt.close()\n def animator(N): # N is the animation frame number\n ax.imshow(video[N])\n return ax\n PlotFrames = range(0,video.shape[0],1)\n anim = animation.FuncAnimation(fig,animator,frames=PlotFrames,interval=100)\n rc('animation', html='jshtml')\n return anim", "def movie(**kwargs):\n\n print(\"let's make a movie!\")\n\n GR = glo.global_results()\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n gal_ob = gal.galaxy(GR=GR, gal_index=p.gal_index)\n celldata = gal_ob.cell_data.get_dataframe()\n\n # Set up grid\n known_points = np.array([celldata.x.values, celldata.y.values, celldata.z.values]).T\n values = celldata[p.prop].values\n values[values == 0] = 1e-6\n values = np.log10(values)\n X, Y, Z = np.meshgrid(np.arange(-gal_ob.radius,gal_ob.radius), np.arange(-gal_ob.radius,gal_ob.radius), np.arange(-gal_ob.radius,gal_ob.radius))\n\n grid = griddata(known_points, values, (X, Y, Z))\n\n # MAKE A FIGURE WITH MAYAVI\n\n duration = 1 # duration of the animation in seconds (it will loop)\n\n print('Now setting up figure')\n\n fig = mlab.figure(size=(200, 200), bgcolor=(1,1,1))\n mlab.contour3d(grid, contours=10, transparent=True, figure=fig)\n\n # ANIMATE THE FIGURE WITH MOVIEPY, WRITE AN ANIMATED GIF\n\n mlab.view(azimuth= 360, distance=200) # starting at this camera angle\n\n duration = 4 # duration of the animation in seconds (it will loop)\n\n def make_frame(t):\n \"\"\" Generates and returns the frame for time t. \"\"\"\n mlab.view(azimuth= 100*t/duration, distance=100) # roll camera angle\n f = mlab.gcf()\n f.scene._lift()\n return mlab.screenshot(antialiased=True) # return a RGB image\n\n animation = mpy.VideoClip(make_frame, duration=duration)#.resize(0.5)\n # Video generation takes 10 seconds, GIF generation takes 25s\n animation.write_videofile(\"plots/movies/test.mp4\", fps=20)\n # animation.write_gif(\"wireframe.gif\", fps=20)", "def create_video_unique(all_obj_locs, fps=30):\n i = 0\n print(len(all_obj_locs[::STEP]))\n for i in range(len(all_obj_locs[::STEP])):\n plt.figure(figsize=(SIZE * 2, SIZE), dpi=80)\n plt.ylim([-LANE_LENGTH / 4 + 25, LANE_LENGTH / 4 + 75])\n plt.xlim([-50, LANE_LENGTH + 50])\n x_s = [p[0][1] for p in all_obj_locs[::STEP][:i + 1]]\n y_s = [p[0][0] for p in all_obj_locs[::STEP][:i + 1]]\n s = 10\n plt.plot([0, 0], [0 - MARGIN, LANE_WIDTH + MARGIN], color=\"red\")\n plt.plot([LANE_LENGTH + MARGIN, LANE_LENGTH + MARGIN], [0 - MARGIN, LANE_WIDTH + MARGIN], color=\"red\")\n plt.plot([0, LANE_LENGTH + MARGIN], [0 - MARGIN, 0 - MARGIN], color=\"red\")\n plt.plot([0, LANE_LENGTH + MARGIN], [LANE_WIDTH + MARGIN, LANE_WIDTH + MARGIN], color=\"red\")\n plt.scatter(x_s, y_s, s=s)\n x_s_pins = init_pins()[:, 0]\n y_s_pins = init_pins()[:, 1]\n plt.scatter(y_s_pins, x_s_pins, s=3, color=\"black\")\n plt.savefig(\"data/frame\" + str(i) + \".png\")\n plt.close()\n # plt.show()\n i += 1\n create_video_from_frames(len(all_obj_locs[::STEP]), fps / STEP / DT)", "def generate_heatmap_video(img_list,size,video_filename): \n out = cv2.VideoWriter(video_filename,cv2.VideoWriter_fourcc(*'DIVX'), 25, size)\n for i in range(len(img_list)):\n out.write(img_list[i])\n out.release()\n print('Heatmap video generated at: ', video_filename)", "def _visualize_numpy_video(vid):\r\n\r\n plt.axis('off')\r\n\r\n num_frames = vid.shape[0]\r\n img = plt.imshow(vid[0])\r\n\r\n for i in range(1, num_frames):\r\n img.set_data(vid[i])\r\n plt.pause(1.0 / 25.0)\r\n\r\n plt.show()", "def render(self, mode='human', action = None, num_col = 1, save_video = False):\n xmin = min(min(self.start_xpoints), min(self.goal_xpoints)) - 10.0\n xmax = max(max(self.start_xpoints), max(self.goal_xpoints)) + 10.0\n ymin = min(min(self.start_ypoints), min(self.goal_ypoints)) - 10.0\n ymax = max(max(self.start_ypoints), max(self.goal_ypoints)) + 10.0\n\n if self.fig is None:\n if not save_video:\n plt.ion()\n fig = plt.figure(figsize = (5*num_col, 5))\n def handle_close(evt):\n exit()\n\n fig.canvas.mpl_connect('close_event', handle_close)\n if not save_video:\n plt.show()\n\n ax = fig.add_subplot(1, num_col, 1)\n\n colors = self.task.robot_colors()# cm.rainbow(np.linspace(0, 1, len(self.x[:, 0])))\n scatter1 = ax.scatter(self.x[:, 0], self.x[:, 1], c=colors)\n scatter2 = ax.scatter(self.goal_xpoints, self.goal_ypoints, c='k', marker=\"x\")\n\n plt.title('%d Robots Formation'%len(self.x))\n #plt.gca().legend(('Robots'))\n\n self.task.plot()\n\n plt.ylim(ymin, ymax)\n plt.xlim(xmin, xmax)\n a = gca()\n a.set_xticklabels(a.get_xticks(), font)\n a.set_yticklabels(a.get_yticks(), font)\n self.fig = fig\n self.scatter1 = scatter1\n self.scatter2 = scatter2\n\n X = self.x[:, 0]\n Y = self.x[:, 1]\n\n self.scatter1.set_offsets(np.c_[X, Y])\n\n ax = self.fig.add_subplot(1, num_col, 1)\n for arrow in self.arrows:\n ax.patches.remove(arrow) \n\n self.arrows = []\n if action != None:\n _, max_per_agent = torch.max(action, dim = 1)\n #print(max_per_agent)\n print(set(max_per_agent.data.cpu().numpy()))\n \n for i in range(self.n_agents):\n x = self.x[i, 0]\n y = self.x[i, 1]\n goal = self.goals[ max_per_agent[i]]\n dx = goal[0] - x\n dy = goal[1] - y\n arrow = plt.Arrow(x, y, dx, dy )\n self.arrows.append(arrow)\n ax.add_patch(arrow)\n\n self.fig.canvas.draw()\n if not save_video:\n self.fig.canvas.flush_events()\n if action != None:\n plt.pause(0.01)\n else:\n plt.pause(0.01)\n\n return self.fig, self.scatter1", "def generate_movie(filename, x_size=640, y_size=360, numframes=150, dpi=100):\n global timeflag\n timeflag = 1\n\n # Functions for red, green, and blue channels - where the magic happens!\n red_function = build_random_function(7, 9)\n green_function = build_random_function(7, 9)\n blue_function = build_random_function(7, 9)\n print \"red_function:\\t\" + str(red_function)\n print \"green_function:\\t\" + str(green_function)\n print \"blue_function:\\t\" + str(blue_function)\n\n for n in range(1, numframes+1):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n t = remap_interval(n, 0, numframes, -1, 1)\n pixels[i, j] = (\n color_map(evaluate_random_function(red_function, x, y, t)),\n color_map(evaluate_random_function(green_function, x, y, t)),\n color_map(evaluate_random_function(blue_function, x, y, t))\n )\n im.save(\"movie_images/\"+'%03d'%n+\".png\")\n\n os.system(\"echo 'yes'|avconv -r 24 -i movie_images/%03d.png -vb 20M myart.mp4\")\n\n \"\"\"fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_aspect('equal')\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n im = Image.new(\"RGB\", (x_size, y_size))\n\n def update_img(n):\n # Functions for red, green, and blue channels - where the magic happens!\n red_function = build_random_function(7, 9)\n green_function = build_random_function(7, 9)\n blue_function = build_random_function(7, 9)\n\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (\n color_map(evaluate_random_function(red_function, x, y, n)),\n color_map(evaluate_random_function(green_function, x, y, n)),\n color_map(evaluate_random_function(blue_function, x, y, n))\n )\n im.save(\"test.png\")\n return im\n ani = animation.FuncAnimation(fig, update_img, numframes, interval=24) #TODO: FIX THIS\n writer = animation.writers['avconv'](fps=24)\n\n ani.save(filename, writer=writer, dpi=dpi)\"\"\"", "def plot_visual_abstract():\n # Which generations to plot\n GENERATIONS = [100, 230, 350]\n\n # LunarLander CMA-ES\n experiment_path = glob(\"experiments/wann_LunarLander-v2_CMAES*\")\n assert len(experiment_path) == 1, \"There should be only one CMA-ES experiment with LunarLander-v2\"\n experiment_path = experiment_path[0]\n\n pivector_paths = glob(os.path.join(experiment_path, \"pivectors\", \"*\"))\n\n tsnes = []\n rewards = []\n for generation in GENERATIONS:\n # Find pivector files for specific generation, load them and store points\n generation_paths = [path for path in pivector_paths if \"gen_{}_\".format(generation) in path]\n\n population = [np.load(path) for path in generation_paths]\n population_tsnes = np.array([x[\"tsne\"] for x in population])\n population_rewards = np.array([x[\"average_episodic_reward\"] for x in population])\n tsnes.append(population_tsnes)\n rewards.append(population_rewards)\n\n figure, axs = pyplot.subplots(\n figsize=[2.5 * 3, 2.5],\n nrows=1,\n ncols=len(GENERATIONS),\n sharex=\"all\",\n sharey=\"all\"\n )\n\n min_reward = min(x.min() for x in rewards)\n max_reward = max(x.max() for x in rewards)\n scatter = None\n\n for idx in range(len(GENERATIONS)):\n population_tsne = tsnes[idx]\n population_rewards = rewards[idx]\n generation = GENERATIONS[idx]\n ax = axs[idx]\n\n scatter = ax.scatter(\n population_tsne[:, 0],\n population_tsne[:, 1],\n c=population_rewards,\n vmin=min_reward,\n vmax=max_reward,\n cmap=\"plasma\"\n )\n ax.set_title(\"Generation {}\".format(generation))\n ax.set_xticks([])\n ax.set_yticks([])\n ax.axis(\"off\")\n\n # Making room for colorbar\n # Stackoverflow #13784201\n figure.subplots_adjust(right=1.0)\n cbar = figure.colorbar(scatter)\n cbar.set_ticks([])\n cbar.ax.set_ylabel(\"Reward $\\\\rightarrow$\", rotation=90, fontsize=\"large\")\n\n figure.tight_layout()\n figure.savefig(\"figures/visual_abstract.pdf\", bbox_inches=\"tight\", pad_inches=0.05)", "def make_video(data,\n xdim, ydim, sample_read_rows, sample_read_cols, image_write_rows, image_write_cols,\n directory, filename, fps = 24.0, start_frame = 1, end_frame = None, timestamp = False, fontsize = 30, ts_pos = (0,0), save_raw = False):\n\n #Command to send via the command prompt which specifies the pipe parameters\n # command = ['ffmpeg',\n # '-y', # (optional) overwrite output file if it exists\n # '-f', 'image2pipe',\n # '-vcodec', 'mjpeg', #'mjpeg',\n # '-r', '1',\n # '-r', str(fps), # frames per second\n # '-i', '-', # The input comes from a pipe\n # '-an', # Tells FFMPEG not to expect any audio\n # '-vcodec', 'mpeg4',\n # '-b:v', '5000k',\n # directory + filename + \"/\"+filename+\".mp4\",\n # '-hide_banner',\n # '-loglevel', 'panic']\n\n # Create directories if they don't exist\n if not os.path.exists(os.path.join(directory, filename, 'frames/')):\n os.makedirs(os.path.join(directory, filename, 'frames/'))\n if save_raw and not os.path.exists(os.path.join(directory, filename, 'frames-raw/')):\n os.makedirs(os.path.join(directory, filename, 'frames-raw/'))\n\n if end_frame == None:\n end_frame = data.FrameCount\n\n cm = colormap.get_cmap('viridis')\n\n for i, frame_offset in enumerate(tqdm.tqdm(range(start_frame, end_frame))):\n frame = FrameRead(data, frame_offset)\n frame_image = np.zeros([ydim, xdim], dtype=np.uint8)\n frame_image[image_write_rows, image_write_cols] = frame.frame_data[sample_read_rows, sample_read_cols]\n\n rgb_im = Image.fromarray(cm(frame_image, bytes=True)).convert('RGB')\n rgb_im.save(os.path.join(directory, filename, 'frames/', f'{i}.jpg'), 'JPEG')\n\n if save_raw:\n Image.fromarray(np.uint8(frame.frame_data), mode='L').save(os.path.join(directory, filename, 'frames-raw/', f'{i}.jpg'), 'JPEG')", "def make_plots(self):\n n_rounds = self.run.n_rounds\n\n log.info('Making %d frames', n_rounds)\n args = [self._get_for_parallel(i) for i in range(n_rounds)]\n self.lbv.map(_plot_helper, args)", "def plot_pRF_DM(dm_array, filename):\n\n # if output path doesn't exist, create it\n\n outfolder = op.split(filename)[0]\n\n if not op.isdir(outfolder): \n os.makedirs(outfolder)\n print('saving files in %s'%filename)\n\n dm_array = (dm_array * 255).astype(np.uint8)\n\n for w in range(dm_array.shape[-1]):\n im = Image.fromarray(dm_array[...,w])\n im.save(op.join(outfolder,\"DM_TR-%s.png\"%str(w).zfill(4))) \n\n ## save as video\n img_name = op.join(outfolder,'DM_TR-%4d.png')\n os.system(\"ffmpeg -r 6 -start_number 0 -i %s -vcodec mpeg4 -y %s\"%(img_name, filename))", "def showVideo( oVideo, oPathXY=np.array([]) ):\n global oVideo_t, iFrame, oPathXY_t\n fig = plt.figure()\n # prikazi prvi okvir\n iFrame = 0\n oPathXY_t = oPathXY\n oVideo_t = oVideo\n print(oVideo.shape)\n im = plt.imshow(oVideo[...,iFrame], cmap=plt.get_cmap('Greys_r'))\n # definiraj funkcijo za osvezevanje prikaza\n def updatefig(*args):\n global oVideo_t, iFrame, oPathXY_t\n iFrame = ( iFrame + 1 ) % oVideo_t.shape[-1]\n im.set_array( oVideo_t[...,iFrame] ) \n if iFrame < oPathXY.shape[0]:\n plt.plot( oPathXY[iFrame,0], oPathXY[iFrame,1], 'xr' ,markersize=3 ) \n return im,\n # prikazi animacijo poti\n ani = animation.FuncAnimation(fig, updatefig, interval=25, blit=True)\n plt.show()", "def save(filename, box_width, fps=25):\n\n video_frames = len(x_frames)\n\n # Generate video output\n x_output = x_frames - box_width * np.floor(x_frames/box_width)\n y_output = y_frames - box_width * np.floor(y_frames/box_width)\n z_output = z_frames - box_width * np.floor(z_frames/box_width)\n\n fig = plt.figure()\n fig.subplots_adjust(left=0, right=1, bottom=0, top=1)\n ax = fig.add_subplot(111, projection='3d', autoscale_on=False,\n xlim=(-box_width, box_width),\n ylim=(-box_width, box_width),\n zlim=(-box_width, box_width))\n\n # ax = fig.add_subplot(111, projection='3d', autoscale_on=False,\n # xlim=(-box_width, box_width),\n # ylim=(-box_width, box_width),\n # zlim=(-box_width, box_width))\n\n\n particles = ax.scatter(x_frames[0], y_frames[0], z_frames[0])\n\n\n def init():\n \"\"\" initialize animation\n \"\"\"\n fig.clear()\n ax = fig.add_subplot(111, projection='3d', autoscale_on=False,\n xlim=(-box_width, box_width),\n ylim=(-box_width, box_width),\n zlim=(-box_width, box_width))\n particles = ax.scatter([], [], [])\n return particles\n\n\n def animate(i):\n \"\"\" perform animation step\n \"\"\"\n fig.clear()\n\n x = x_output[i]\n y = y_output[i]\n z = z_output[i]\n r = ((x-9.0)**2 + (y+9.0)**2 + (z-9.0)**2)**0.5\n r = 500/r\n\n # ax = fig.add_subplot(111, projection='3d', autoscale_on=False,\n # azim=-45,elev=35,\n # xlim=(-box_width, box_width),\n # ylim=(-box_width, box_width),\n # zlim=(-box_width, box_width))\n\n ax = fig.add_subplot(111, projection='3d', autoscale_on=False,\n azim=-45,elev=35,\n xlim=(0.0, box_width),\n ylim=(0.0, box_width),\n zlim=(0.0, box_width))\n\n particles = ax.scatter(x_output[i], y_output[i], z_output[i], s=r, c='k', marker='o', edgecolors='none')\n\n return particles\n\n\n # Create the animation object\n ani = animation.FuncAnimation(fig, animate, frames=video_frames,\n interval=10, blit=True, init_func=init)\n\n # Save the animation to disk\n # Change fps for another framerate\n ani.save(filename+'.mp4', fps=fps)", "def create_video_hit(all_obj_locs, fps=30):\n i = 0\n print(len(all_obj_locs[::STEP]))\n for f in all_obj_locs[::STEP]:\n fig = plt.figure(figsize=(SIZE * 2, SIZE), dpi=80)\n ax = fig.add_subplot(projection='3d')\n ax.set_axis_off()\n plt.ylim([-70, 170])\n plt.xlim([1800 - 40, LANE_LENGTH + 50 + 40])\n\n # plt.axis(\"off\")\n x_s = [p[1] for p in f[:-1, :] if p[2] < 40 and 0 < p[0] < LANE_WIDTH and p[1] < LANE_LENGTH + 50]\n y_s = [p[0] for p in f[:-1, :] if p[2] < 40 and 0 < p[0] < LANE_WIDTH and p[1] < LANE_LENGTH + 50]\n z_s = [p[2] for p in f[:-1, :] if p[2] < 40 and 0 < p[0] < LANE_WIDTH and p[1] < LANE_LENGTH + 50]\n s = 300\n ax.set_zlim(-5, 140)\n ax.scatter(x_s, y_s, z_s, s=s)\n ax.scatter([f[-1, 1]], [f[-1, 0]], [f[-1, 2]], s=s / 2, color=\"red\")\n plt.savefig(\"data/frame\" + str(i) + \".png\")\n plt.close()\n plt.show()\n i += 1\n create_video_from_frames_hit(len(all_obj_locs[::STEP]), fps / STEP / DT / 10)", "def plot_visualization(path_results, x_data, y_data, variant_mode, nb_classes, signal_test, args):\n\n\t#path_tsne = path_results + \"/Visualization/train/\" + str(args.step) + \"_2d.csv\"\n\t#data_frame = pd.read_csv(path_tsne)\n\t\n\tpath_maping = path_results + \"/Maping/\" + str(args.subject).split(\".txt\")[0] + \"/\"\n\tfilename = path_maping + \"maping_\" + str(args.step) + \"_\" + str(args.subject).split(\".txt\")[0] + \"_stick\" + str(args.stick) + \".png\"\n\n\tprint(\"path_save maping\", path_maping)\n\n\tif not os.path.exists(path_maping):\n\t\tos.makedirs(path_maping)\n\n\t#print(\"path_tsne\", path_tsne)\n\n\tlabel_maping = np.array([10])\n\n\tx_data = np.concatenate((x_data,signal_test),axis=0)\n\ty_data = np.concatenate((y_data,label_maping),axis=0)\n\n\tprint(\"x_data concatenate\",x_data.shape)\n\tprint(\"y_data concatenate\",y_data.shape)\n\n\tdata_frame = tsne_2d(x_data, y_data)\n\n\t\n\t\n\tgroups = data_frame.groupby('label')\n\n\tcluster_names, cluster_colors = get_target_names_dr(nb_classes, args.mode, args, variant_mode)\n\n\tfig = plt.figure(figsize=(20, 10))\n\tax = fig.add_subplot(111)\n\tax.margins(0.05) # Optional, just adds 5% padding to the autoscaling\n\tfor name, group in groups:\n\t\t\n\t\tif cluster_names[name] == str(args.subject):\n\t\t\tax.scatter(group.x, group.y, marker='D', s=150, edgecolors = 'face',label=cluster_names[name], color=cluster_colors[name])\n\t\telse:\n\t\t\tax.scatter(group.x, group.y, marker='o', label=cluster_names[name], color=cluster_colors[name])\n\n\tax.legend(numpoints=1) #show legend with only 1 point\n\tplt.savefig(filename) #save the plot", "def plot_test_sclf_xp_movie(plot_module,\n calc_ids,\n particle_names,\n ylim,\n sample_dict=None,\n xlim=None,\n tt=None,\n fps=None,\n keep_frame_files=None,\n moving_grid_dict=None,\n use_cell_coordinates=False,\n show_cells=False,\n time_normalization=None,\n **kwargs):\n # make sure calc_id is a sequence\n if not isinstance( calc_ids, (list,tuple) ):\n calc_ids = (calc_ids,)\n # make sure particle_names is a sequence\n if not isinstance( particle_names, (list,tuple) ):\n particle_names = (particle_names,)\n # particles sequence\n xps=[]\n for pname in particle_names:\n xps.append( tdc_Data_Sequence.init_from_data( tdc_XP_Data,\n calc_ids=calc_ids,\n particle_name=pname,\n sample_dict=sample_dict,\n tt=tt,\n time_normalization=time_normalization,\n **kwargs) )\n # plotter\n pp = test_sclf_XPs_Plotter(calc_ids[0],xps=xps)\n if use_cell_coordinates:\n pp.use_cell_coordinates()\n if show_cells:\n pp.show_cells_on()\n # plot moving grid if asked\n if moving_grid_dict:\n pp = tdc_Moving_Grid_Plotter(pp,moving_grid_dict)\n # movie frames\n MF = plot_module.MovieFrames.Single_Panel_Movie_Frames(pp, ylim=ylim, xlim=xlim)\n # movie_id - directory with the movie file\n movie_id = 'XP' + '_' + calc_ids[0]\n # -----------------------------------------\n # make movie\n plot_module.MovieMakers.plot_movie( MF, movie_id, fps, keep_frame_files)", "def _plot_camera_view(self):\n fig, axs = plt.subplots()\n fig.show()\n axs.cla()\n axs.axis([-0.003, 0.003, -0.003, 0.003])\n axs.grid()\n axs.plot([0], [0], 'r+')\n for t_step in range(0, int(self._t_sim / self._dt) + 1, 250):\n axs.plot(\n self._feat_vec[t_step, 0, 0],\n self._feat_vec[t_step, 1, 0], 'ro')\n axs.plot(\n self._feat_vec[t_step, 0, 1],\n self._feat_vec[t_step, 1, 1], 'bo')\n axs.plot(\n self._feat_vec[t_step, 0, 2],\n self._feat_vec[t_step, 1, 2], 'yo')\n axs.plot(\n self._feat_vec[t_step, 0, 3],\n self._feat_vec[t_step, 1, 3], 'go')\n axs.plot(\n self._feat_vec[t_step, 0, 4],\n self._feat_vec[t_step, 1, 4], 'ro')\n plt.pause(1 / self._plot_fps)", "def plot_video(video, fps=1):\n global vis\n opts = dict(fps=int(fps))\n win = vis.video(video, opts=opts)\n return win", "def viz_samples(data, trace, num_sweeps, K, viz_interval=3, figure_size=3, title_fontsize=20, marker_size=1.0, opacity=0.3, bound=20, colors=['#AA3377','#0077BB', '#EE7733', '#009988', '#BBBBBB', '#EE3377', '#DDCC77'], save_name=None):\n E_tau, E_mu, E_z = trace['E_tau'].cpu(), trace['E_mu'].cpu(), trace['E_z'].cpu()\n num_rows = len(data)\n num_cols = 2 + int((num_sweeps-1) / viz_interval)\n gs = gridspec.GridSpec(num_rows, num_cols)\n gs.update(left=0.0 , bottom=0.0, right=1.0, top=1.0, wspace=0, hspace=0)\n fig = plt.figure(figsize=(figure_size * num_cols, figure_size * num_rows))\n for row_ind in range(num_rows):\n ax = fig.add_subplot(gs[row_ind, 0])\n viz_gmm(ax, data[row_ind], K, marker_size, opacity, bound, colors, latents=None) ## visualize raw dataset in the 1st column\n if row_ind == 0:\n ax.set_title('Data', fontsize=title_fontsize)\n# col_ind = 1\n for col_ind in range(num_cols-1):\n sweep = col_ind * viz_interval\n ax = fig.add_subplot(gs[row_ind, col_ind+1])\n viz_gmm(ax, data[row_ind], K, marker_size, opacity, bound, colors, latents=(E_tau[sweep, row_ind], E_mu[sweep, row_ind], E_z[sweep, row_ind]))\n if row_ind == 0:\n if sweep == 0:\n ax.set_title('RWS', fontsize=title_fontsize)\n else:\n ax.set_title('sweep %d' % sweep, fontsize=title_fontsize)\n if save_name is not None:\n plt.savefig(save_name + '.svg', dpi=300)", "def scatter_plot(self):\n sns.set_style('whitegrid')\n \n fig, ax = plt.subplots()\n cmap = sns.cubehelix_palette(8, start=.5, rot=-.75, as_cmap=True)\n \n \n plt.title('Benchmark and Trial Samples', fontsize=16)\n \n ax.xaxis.set_tick_params(labelsize=16, direction='inout', length=6, width=1, color='gray')\n ax.yaxis.set_tick_params(labelsize=16, direction='inout', length=6, width=1, color='gray')\n \n ax.scatter(self.x_benchmark[:,0], self.x_benchmark[:,1], c='magenta',\n alpha=0.5, marker='x',label='B sample')\n ax.scatter(self.x_trial[:,0],self.x_trial[:,1], c='blue',\n alpha=0.2, marker='s',label='T sample')\n \n plt.grid(True)\n plt.legend(loc='upper left', fontsize=14)\n # plt.show()\n plt.savefig(\"pyplot.png\")", "def pic_scatter():\n vu.pic_scatter(annual_report_indexes, 'annual_report')", "def make_time_series_animation(movie_info, \n animation_dir,\n time_series_data_path,\n mouse_type, \n format):\n animal_id, exp_date, exp_type = movie_info['name'].split('_')\n print \"making time series animation\"\n\n cmd = ['python', '../analysis/group_analysis.py']\n cmd += ['--input-path='+str(time_series_data_path)]\n cmd += ['--output-path='+str(animation_dir)]\n cmd += ['--time-series-animation']\n\n cmd += ['--mouse-type='+str(mouse_type)]\n cmd += ['--plot-format='+str(format)]\n cmd += ['--exp-type='+str(exp_type)]\n cmd += ['--animal-id='+str(animal_id)]\n cmd += ['--exp-date='+str(exp_date)]\n\n cmd_string = ''.join([\"%s \" % el for el in cmd])\n print '-->Running: ', cmd_string\n p = subprocess.Popen(cmd, shell=False)# stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n p.wait()", "def plot_model(self):\n \n plt.figure(figsize=[10,5])\n \n plt.scatter(self.receivers['recxs'],self.receivers['reczs'],marker='v')\n if self.source['src_type']==4:\n from obspy.imaging.beachball import beach\n beach = beach(self.source['mt'], xy=(self.source['srcx'],self.source['srcz']), width=self.model_parameters['xmax']*0.05)\n ax = plt.gca()\n \n ax.add_collection(beach) \n ax.set_aspect(\"equal\")\n \n else:\n plt.scatter(self.source['srcx'],self.source['srcz'],marker='*',color='r',s=200)\n \n plt.axhline(y=0,c='0.5')\n plt.xlim(0,self.model_parameters['xmax'])\n plt.ylim(self.model_parameters['zmax'],-0.1*self.model_parameters['zmax'])\n \n plt.xlabel('Distance (km)')\n plt.ylabel('Depth (km)')\n plt.grid()\n plt.show()", "def _generate_scatter_plots(self):\n\n for xcolname, ycolname in itertools.product(self.xaxes, self.yaxes):\n _LOG.info(\"Generating scatter plot: %s vs %s.\", xcolname, ycolname)\n\n pinfo = self._add_pinfo(xcolname, ycolname, is_hist=False)\n markers = itertools.cycle(_SCATTERPLOT_MARKERS)\n gobjs = []\n\n for res in self.rsts:\n df = self._reduce_df_density(res, xcolname, ycolname)\n\n # How many datapoints were included into the scatter plot.\n pinfo[\"sp_datapoints_cnt\"] = len(df.index)\n\n text = self._create_hover_text(res, df, pinfo)\n marker = {\"size\" : 4, \"symbol\" : next(markers), \"opacity\" : self._opacity}\n try:\n gobj = plotly.graph_objs.Scattergl(x=self._base_unit(df, xcolname),\n y=self._base_unit(df, ycolname),\n opacity=self._opacity,\n text=text, mode=\"markers\",\n name=res.reportid, marker=marker)\n except Exception as err:\n raise Error(f\"failed to create scatter plot '{ycolname}-vs-{xcolname}':\\n{err}\")\n gobjs.append(gobj)\n\n self._create_diagram(gobjs, pinfo)", "def video_files():\n p = parse_cmdline(get_parser=get_parser_files)\n log.setup_main_handler(\n mods=(\"fogtools\", \"typhon\", \"fogpy\", \"sattools\", \"fcitools\", \"satpy\",\n \"pyresample\"),\n level=logging.INFO)\n vis.show_video_abi_glm(\n files=p.files,\n img_out=p.filename_pattern_image,\n vid_out=p.filename_pattern_video,\n out_dir=p.outdir)\n print(\"Files written to:\", p.outdir)", "def generate_video(image_folder, video_name, video_frames_path):\n \n try:\n os.stat(video_frames_path)\n except:\n os.makedirs(video_frames_path)\n \n images = [img for img in os.listdir(image_folder)\n if img.endswith(\".jpg\") or\n img.endswith(\".jpeg\") or\n img.endswith(\"png\") or\n img.endswith(\"tif\")]\n\n images.sort()\n\n print(images)\n\n frame = cv2.imread(os.path.join(image_folder, images[0]))\n\n height, width, layers = frame.shape\n\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n video = cv2.VideoWriter(video_frames_path + '/' + video_name, fourcc, 1, (width, height))\n\n # Appending the images to the video one by one\n video_frame = np.zeros((height, width, 3), np.uint8)\n for image in images:\n img = cv2.imread(os.path.join(image_folder, image), cv2.IMREAD_UNCHANGED)\n video_frame = overlay_transparent(video_frame, img)\n cv2.imwrite(os.path.join(video_frames_path, image), video_frame)\n video.write(video_frame)\n\n # Deallocating memories taken for window creation\n cv2.destroyAllWindows()\n video.release() # releasing the video generated" ]
[ "0.6680317", "0.64790976", "0.6428148", "0.6390699", "0.63647693", "0.6335337", "0.6217466", "0.6141426", "0.61217576", "0.605406", "0.60315275", "0.60152674", "0.59969765", "0.5970817", "0.59612995", "0.5947531", "0.5892995", "0.5829542", "0.5770267", "0.57338464", "0.5695602", "0.5639808", "0.56209624", "0.5614852", "0.56141824", "0.5608853", "0.56034076", "0.559887", "0.5598514", "0.5571494" ]
0.660165
1
Compute the linear decay rate of quantity x at time t. x(t) = x0 (1alpha) x0 t / T if t T
def linear_decay(x0, alpha, T, t): if t <= T: return x0 - (1 - alpha) * x0 * t / T else: return alpha * x0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_rate(self, t):\n return self.l_0 + \\\n self.alpha * sum(np.exp([self.beta * -1.0 * (t - s)\n for s in self.prev_excitations\n if s <= t]))", "def decay(time_, max_time, coeff):\n threshold = max_time - time_\n if threshold < 0:\n threshold = 0\n return 1 + threshold * coeff / max_time", "def asymptotic_decay(learning_rate, t, max_iter):\n return learning_rate / (1+t/(max_iter/2))", "def learning_rate_decay(alpha, decay_rate, global_step, decay_step):\n \"\"\"if staircase == True\n decayed_learning_rate = learning_rate /\n (1 + decay_rate * floor(global_step / decay_step)))\"\"\"\n return tf.train.inverse_time_decay(\n alpha, global_step, decay_step, decay_rate, staircase=True)", "def learning_rate_decay(alpha, decay_rate, global_step, decay_step):\n\n lr = tf.train.inverse_time_decay(alpha, global_step, decay_step,\n decay_rate, staircase=True)\n\n return lr", "def decay_learning_rate(initial_learning_rate, i, n_iterations):\n return initial_learning_rate * np.exp(-i / n_iterations)", "def learning_rate_decay(alpha, decay_rate, global_step, decay_step):\n gd = tf.train.inverse_time_decay(alpha, global_step, decay_step,\n decay_rate, staircase=True)\n return gd", "def lr_decay(step):\n return(alpha / (1 + decay_rate * step))", "def learning_rate_decay(alpha, decay_rate, global_step, decay_step):\n return tf.train.inverse_time_decay(\n alpha, global_step, decay_step, decay_rate, staircase=True\n )", "def learning_rate_decay(alpha, decay_rate, global_step, decay_step):\n\n return tf.train.inverse_time_decay(alpha, global_step, decay_step,\n decay_rate, staircase=True)", "def learning_rate_decay(a, d_rate, g_step, d_step):\n return tf.train.inverse_time_decay(a, g_step, d_step, d_rate, True)", "def learning_rate(self, t):\n # return self.init_learning_rate * (1 - t)\n return self.init_learning_rate / (1 + t)\n # return self.init_learning_rate * exp(-t)\n # return self.init_learning_rate * (.005 / self.init_learning_rate) ** t", "def decay(t1, t2, lamb):\n\n return (math.exp(-lamb*t1) - math.exp(-lamb*t2))/(lamb*(t2-t1))", "def learning_rate_decay(alpha, decay_rate, global_step, decay_step):\n epoc_number = int(global_step / decay_step)\n alpha /= (1 + decay_rate * epoc_number)\n return alpha", "def model_growth_rate(t, a_0, omega):\n a = a_0 * np.exp(omega * t)\n return a", "def learning_rate_decay(alpha, decay_rate, global_step, decay_step):\n return tf.train.inverse_time_decay(\n learning_rate=alpha,\n global_step=global_step,\n decay_steps=decay_step,\n decay_rate=decay_rate,\n staircase=True)", "def linear_decay(epoch: int, total_num_updates: int) -> float:\n return 1 - (epoch / float(total_num_updates))", "def lr(self):\n if self.T is None:\n return self.lr0\n else:\n return linear_decay(self.lr0, self.alpha, self.T, self.step)", "def _decay_rate_pow(i: int, exponent: float = 0.8) -> float:\n t = jnp.array(i, jnp.float32) + 1.0\n return 1.0 - t**(-exponent)", "def next(self, dt):\n self.x = self.x + \\\n (self.rate-0.5*self.vola*self.vola)*dt + \\\n sqrt(dt)*self.vola*np.random.normal()\n return exp(self.x)", "def update_pulse_rate(t,r_0 = 0.2,gamma=0.05):\n return r_0 * (1 - exp(-gamma*t))", "def expDecay(L, N0, tf):\n\n\treturn N0*math.exp(-L*tf)", "def exponentialLearningRate(base):\n def function(t):\n return base ** (t-1)\n return function", "def rate(self, t, yt):\n # TODO add with parameters\n T = yt[-1]\n y = yt[:-1]\n # self.__log.debug('Em %s', Em)\n dIdt = (self.parameters.A0 * np.exp(-self._Em / Rgas / T))\n # self.__log.debug('dkdt %s', dkdt)\n coeff1 = self.Wm * self.mt / sqrtpi\n coeff2 = np.exp(-pow(\n (self._Em - self.parameters.E0) / self.parameters.sigma, 2) / 2)\n coeff3 = np.exp(-y[1:]) * dIdt\n # self.__log.debug('coeff: %s %s %s', coeff1, coeff2, coeff3)\n # dydt = (self.parameters['y0'] - y[0]) * \\\n # np.sum(coeff1 + coeff2 + coeff3)\n dydt = self.parameters.y0 * np.sum(coeff1 * coeff2 * coeff3)\n # self.__log.debug('dydt %s', dydt)\n return np.append(dydt, dIdt)", "def forward_price(S, t, r):\n return S / np.exp(-r * t)", "def input_f(t,decay=0.5,freq=1.5):\n u_t = 1*(t>0)\n return np.cos(freq*t)*np.exp(-decay*t) * u_t", "def decay(self):\n if self.l_rate > self.l_rate_bound[0] and self.l_rate - self.decay_rate > 0.0:\n self.l_rate -= self.decay_rate\n elif self.l_rate - self.decay_rate <= 0.0 or self.l_rate < self.l_rate_bound[0]:\n self.l_rate = self.l_rate_bound[0]\n\n for layer in self.network:\n layer.learning_rate = self.l_rate", "def model_1exp_fft(ns, a, t, s, t0=0):\n a, t, s = physicond(a, t, s)\n\n # auxilary function taking as argument a time array.\n def aux(nu_array):\n rising = a*s / (1j * 2*np.pi * s*nu_array + 1)\n decaying = a*t / (1j * 2*np.pi * t*nu_array + 1)\n offset_phase = np.exp(-1j * 2*np.pi * t0*nu_array)\n return (decaying - rising) * offset_phase * ns\n\n return aux", "def linear_schedule(initial_value: float) -> Callable[[float], float]:\n def func(progress_remaining: float) -> float:\n \"\"\"\n Progress will decrease from 1 (beginning) to 0.\n\n :param progress_remaining:\n :return: current learning rate\n \"\"\"\n return progress_remaining * initial_value\n\n return func", "def linear_schedule(initial_value: float) -> Callable[[float], float]:\n def func(progress_remaining: float) -> float:\n \"\"\"\n Progress will decrease from 1 (beginning) to 0.\n\n :param progress_remaining:\n :return: current learning rate\n \"\"\"\n return progress_remaining * initial_value\n\n return func" ]
[ "0.6874832", "0.68739563", "0.6858953", "0.68510693", "0.68177027", "0.6815531", "0.68047565", "0.67692065", "0.67592347", "0.6751221", "0.67395395", "0.6719127", "0.67059916", "0.6692392", "0.6672063", "0.66283715", "0.6593637", "0.656726", "0.64751", "0.641536", "0.6249071", "0.624531", "0.6219867", "0.62196213", "0.6195561", "0.61905545", "0.6156425", "0.6105536", "0.6091331", "0.6091331" ]
0.82191306
0
Returns a feed_dict with the learning rate filled in.
def feed_dict(self): return {self.lr_tensor: self.lr()}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize_learning_rate(self):\n\n if (self.FLAGS.learning_rate_decay is \"exponential\"):\n self.learning_rate = tf.train.exponential_decay(\n self.FLAGS.learning_rate,\n self.global_step,\n self.FLAGS.decay_steps,\n self.FLAGS.decay_rate)\n else :\n self.learning_rate = self.FLAGS.learning_rate", "def update_learning_rate(self) -> None:\n self.epsilon = self.initial_epsilon / (1. + self.rate_decay * self.n_it)\n return", "def update_learning_rate(self) -> None:\n self.epsilon = self.initial_epsilon / (1. + self.rate_decay * self.n_it)\n return", "def learning_rate(epoch):\n self.lr = self.lr / 1.00000001\n return self.lr", "def assign_learning_rate(session, lr_update, lr_placeholder, new_lr):\n session.run(lr_update, feed_dict={lr_placeholder: new_lr})", "def learning_rate_fn():\n start_learning_rate = FLAGS.start_learning_rate\n step = tf.cast(tf.compat.v1.train.get_or_create_global_step(), 'float32')\n effective_step = tf.maximum(step - FLAGS.lr_decay_after_num_steps + 1, 0)\n lr_step_ratio = tf.cast(effective_step, 'float32') / float(\n FLAGS.lr_decay_steps)\n warm_up_factor = tf.cast(tf.minimum(step / float(FLAGS.warm_up_steps), 1.),\n 'float32')\n final_learning_rate = FLAGS.gpu_learning_rate\n # Ease in to final learning rate.\n lr = ((1. - warm_up_factor) * start_learning_rate) + (\n warm_up_factor * final_learning_rate)\n lr = tf.cast(lr, 'float32')\n if FLAGS.lr_decay_type == 'none' or FLAGS.lr_decay_steps <= 0:\n return lr\n elif FLAGS.lr_decay_type == 'exponential':\n return lr * 0.5**lr_step_ratio\n else:\n raise ValueError('Unknown lr_decay_type', FLAGS.lr_decay_type)", "def GetLearnRate(DILr,Epochs):\n\n if(DILr.mode == 'poly'):\n ScheduelLr = PolynomialDecay(maxEpochs=DILr.StepSize,initAlpha=DILr.Lr,power=DILr.factor)\n ScheduelLr.plot(range(1,int(Epochs)+1))\n lrate = LearningRateScheduler(ScheduelLr)\n elif(DILr.mode == 'cycle'):\n lrate = CyclicLR(step_size=DILr.StepSize,mode=DILr.cycle,gamma=DILr.factor,base_lr=DILr.MinLr,max_lr=DILr.Lr)\n elif(DILr.mode == 'drop'):\n ScheduelLr = StepDecay(initAlpha=DILr.Lr, factor=DILr.factor, dropEvery=DILr.StepSize)\n ScheduelLr.plot(range(1,int(Epochs)+1))\n lrate = LearningRateScheduler(ScheduelLr)\n elif(DILr.mode == 'normal'):\n lrate = None\n\n return np.asarray(lrate)", "def learning_rate(epoch):\n return alpha / (1 + decay_rate * epoch)", "def learning_rate(epoch):\n return alpha / (1 + decay_rate * epoch)", "def __init__(self, lr, eps=1e-6):\n LearningRate.__init__(self, lr)\n\n self.epsilon = eps\n self.parameters = []", "def get_learning_rate(learning_rate, hidden_size, learning_rate_warmup_steps):\n with tf.name_scope(\"learning_rate\"):\n warmup_steps = tf.to_float(learning_rate_warmup_steps)\n step = tf.to_float(tf.train.get_or_create_global_step())\n\n learning_rate *= (hidden_size ** -0.5)\n # Apply linear warmup\n learning_rate *= tf.minimum(1.0, step / warmup_steps)\n # Apply rsqrt decay\n learning_rate *= tf.rsqrt(tf.maximum(step, warmup_steps)) \n return learning_rate", "def build_learning_rate(self):\n\n # TODO(arashwan): Explore if we want to only allow explicit const lr sched.\n if not self._lr_config:\n lr = self._optimizer_config.learning_rate\n else:\n lr = LR_CLS[self._lr_type](**self._lr_config.as_dict())\n\n if self._warmup_config:\n lr = WARMUP_CLS[self._warmup_type](lr, **self._warmup_config.as_dict())\n\n return lr", "def get_learning_rate(self):\n config = self.config\n cur_lr = Train_model_pipeline.adjust_learning_rate(\n self.optimizer,\n self.epoch,\n config[\"training\"][\"learning_rate\"],\n decay=config[\"training\"][\"lr_decay_rate\"],\n step=config[\"training\"][\"lr_decay_step\"],\n )\n self.cur_lr = cur_lr\n return cur_lr", "def build_learning_rate(initial_learning_rate,\n global_step,\n decay_steps=250000,\n add_summary=True,\n warmup_steps=2500):\n learning_rate = tf.train.cosine_decay(initial_learning_rate, global_step,\n decay_steps)\n tf.logging.info('Warmup_steps: {}'.format(warmup_steps))\n warmup_learning_rate = (\n initial_learning_rate * tf.cast(global_step, tf.float32) / tf.cast(\n warmup_steps, tf.float32))\n\n learning_rate = tf.cond(\n global_step < warmup_steps,\n lambda: tf.minimum(warmup_learning_rate, learning_rate),\n lambda: learning_rate)\n\n if add_summary:\n tf.summary.scalar('learning_rate', learning_rate)\n\n return learning_rate", "def fix_initial_nan_learning_rate(dict_list):\n if len(dict_list) > 1:\n dict_list[0]['LearningRate'] = dict_list[1]['LearningRate']", "def _update_initial_learning_rate(configs, learning_rate):\n\n optimizer_type = get_optimizer_type(configs[\"train_config\"])\n if optimizer_type == \"rms_prop_optimizer\":\n optimizer_config = configs[\"train_config\"].optimizer.rms_prop_optimizer\n elif optimizer_type == \"momentum_optimizer\":\n optimizer_config = configs[\"train_config\"].optimizer.momentum_optimizer\n elif optimizer_type == \"adam_optimizer\":\n optimizer_config = configs[\"train_config\"].optimizer.adam_optimizer\n else:\n raise TypeError(\"Optimizer %s is not supported.\" % optimizer_type)\n\n learning_rate_type = get_learning_rate_type(optimizer_config)\n if learning_rate_type == \"constant_learning_rate\":\n constant_lr = optimizer_config.learning_rate.constant_learning_rate\n constant_lr.learning_rate = learning_rate\n elif learning_rate_type == \"exponential_decay_learning_rate\":\n exponential_lr = (\n optimizer_config.learning_rate.exponential_decay_learning_rate)\n exponential_lr.initial_learning_rate = learning_rate\n elif learning_rate_type == \"manual_step_learning_rate\":\n manual_lr = optimizer_config.learning_rate.manual_step_learning_rate\n original_learning_rate = manual_lr.initial_learning_rate\n learning_rate_scaling = float(learning_rate) / original_learning_rate\n manual_lr.initial_learning_rate = learning_rate\n for schedule in manual_lr.schedule:\n schedule.learning_rate *= learning_rate_scaling\n elif learning_rate_type == \"cosine_decay_learning_rate\":\n cosine_lr = optimizer_config.learning_rate.cosine_decay_learning_rate\n learning_rate_base = cosine_lr.learning_rate_base\n warmup_learning_rate = cosine_lr.warmup_learning_rate\n warmup_scale_factor = warmup_learning_rate / learning_rate_base\n cosine_lr.learning_rate_base = learning_rate\n cosine_lr.warmup_learning_rate = warmup_scale_factor * learning_rate\n else:\n raise TypeError(\"Learning rate %s is not supported.\" % learning_rate_type)", "def __update(self, learning_rate):\n for layer in self.layers:\n layer.weights.set_value((layer.weights - learning_rate * layer.dW).eval())\n layer.biases.set_value((layer.biases - learning_rate * layer.db).eval())", "def update_learning_rate(self):\n self.scheduler.step()\n lr = self.optimizer.param_groups[0]['lr']\n print('learning rate = %.7f' % lr)", "def test_learning_rate(lrmin=0.1,lrmax=1,lrstep=0.2):\n for lrate in np.arange(lrmin,lrmax,lrstep):\n print(\"learning rate: %s\" % lrate)\n get_nn_parameters(epochs=1, learning_rate=lrate)", "def get_learning_rate(training_params):\n\n initial_lr = training_params.initial_lr\n lr_decay_factor = training_params.lr_decay_factor\n lr_decay_steps_str = training_params.lr_decay_steps_str\n if lr_decay_steps_str:\n global_step = tf.train.get_or_create_global_step()\n lr_decay_steps = [int(s) for s in lr_decay_steps_str.split(\",\")]\n\n lr = tf.train.piecewise_constant(\n global_step,\n lr_decay_steps,\n [initial_lr * (lr_decay_factor ** i)\n for i in range(len(lr_decay_steps) + 1)]\n )\n else:\n lr = initial_lr\n return lr", "def pull_weights(self, learning_rate):\n for w in self.weights:\n w.value += learning_rate * w.gradient\n # Reset all the weights' gradient to 0\n # We will not reset all other units' gradient, because all other units should be initialized in next training\n # round, and the init value of gradient is 0\n for w in self.weights:\n w.gradient = 0", "def get_learning_rate():\n return 0.00001", "def __call__(self, epoch):\n # Compute the new dynamic learning rate, log in onto TensorBoard and\n # return the result for the training process\n learning_rate = self.schedule(epoch)\n tf.summary.scalar('learning rate', data=learning_rate, step=epoch)\n return learning_rate", "def __init__(self, epochs, learning_rate):\n\n self.epochs = epochs\n self.learning_rate = learning_rate", "def _update_learning_rate(self):\r\n\r\n self.n_steps += 1\r\n lr = self.factor * self._get_lr_scale()\r\n for param_group in self._optimizer.param_groups:\r\n param_group['lr'] = lr", "def feed_dict(self, dataset: Dataset, train: bool = False) -> FeedDict:\n return {}", "def configure_learning_rate(num_samples_per_epoch, global_step):\n start_decay_step, decay_steps, decay_factor = _get_decay_info()\n tf.logging.info(\" decay_scheme=%s, start_decay_step=%d, decay_steps %d, \"\n \"decay_factor %g\" % (FLAGS.decay_scheme,\n start_decay_step,\n decay_steps,\n decay_factor))\n learning_rate = _get_learning_rate_warmup(global_step)\n\n if FLAGS.learning_rate_decay_type == 'exponential':\n return tf.cond(\n global_step < start_decay_step,\n lambda: learning_rate,\n lambda: tf.train.exponential_decay(\n learning_rate,\n (global_step - start_decay_step),\n decay_steps, decay_factor, staircase=True),\n name=\"exponential_learning_rate_decay_cond\")\n elif FLAGS.learning_rate_decay_type == 'fixed':\n return tf.constant(FLAGS.learning_rate, name='fixed_learning_rate')\n elif FLAGS.learning_rate_decay_type == 'polynomial':\n return tf.cond(\n global_step < start_decay_step,\n lambda: learning_rate,\n lambda: tf.train.polynomial_decay(\n learning_rate,\n (global_step - start_decay_step),\n decay_steps,\n FLAGS.end_learning_rate,\n power=1.0,\n cycle=False),\n name=\"polynomial_learning_rate_decay_cond\")\n\n else:\n raise ValueError('learning_rate_decay_type [%s] was not recognized' %\n FLAGS.learning_rate_decay_type)", "def update_learning_rate(self) -> None:\n optimizer = list(self.optimizers.values())[0]\n old_lr = optimizer.param_groups[0]['lr']\n for name, scheduler in self.schedulers.items():\n if name == 'generator' and self.opt.generator_scheduler_name == 'plateau':\n scheduler.step(self.metric)\n elif name == 'discriminator' and self.opt.discriminator_scheduler_name == 'plateau':\n scheduler.step(self.metric)\n else:\n scheduler.step()\n\n lr = optimizer.param_groups[0]['lr']\n print('learning rate %.7f -> %.7f' % (old_lr, lr))\n return", "def learning_rate_decaying(optimizer, rate):\n lr = get_learning_rate(optimizer) * rate\n for param_group in optimizer.param_groups:\n param_group[\"lr\"] = lr", "def update(self):\n for filter in self.filters:\n filter.update(self.learning_rate)" ]
[ "0.65159", "0.615306", "0.615306", "0.6120223", "0.60584396", "0.6019127", "0.59622", "0.59313875", "0.59313875", "0.59218276", "0.5902011", "0.5895842", "0.5883783", "0.5803965", "0.57955194", "0.57940125", "0.5793272", "0.5792256", "0.5776716", "0.57706064", "0.5765436", "0.57617086", "0.5723206", "0.5722333", "0.57208383", "0.5707399", "0.5695572", "0.5690615", "0.56533384", "0.56454945" ]
0.6703134
0
This function uses patterns to test wether or not a temporal annotation is of a known absolute format
def is_absolute_timexe(string): patterns = [ '(\d+)', # just digits '(\d+/\d+/\d+)', '(\d+/\d+)', '(\d+-\d+-\d+)', '(\d+-\d+)', '^\d{1,2}\/\d{1,2}\/\d{4}$', # matches dates of the form XX/XX/YYYY where XX can be 1 or 2 digits long and YYYY is always 4 digits long. "^((([0]?[1-9]|1[0-2])(:|\.)[0-5][0-9]((:|\.)[0-5][0-9])?( )?(AM|am|aM|Am|PM|pm|pM|Pm))|(([0]?[0-9]|1[0-9]|2[0-3])(:|\.)[0-5][0-9]((:|\.)[0-5][0-9])?))$", # Matches times seperated by either : or . will match a 24 hour time, or a 12 hour time with AM or PM specified. Allows 0-59 minutes, and 0-59 seconds. Seconds are not required. "^ ((0[1 - 9]) | (1[0 - 2]))\ / (\d{2})$" ] for pattern in patterns: if re.match(pattern, string) is not None: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_format_of_annotation_in_file(self):\n if not self.is_span_valid():\n sys.exit()", "def _is_format_endpoint(pattern):\n return '?P<format>' in pattern._regex", "def __check_pattern(node):\n if node.tag != \"discover_datasets\":\n return False\n if \"from_tool_provided_metadata\" in node.attrib and string_as_bool(\n node.attrib.get(\"from_tool_provided_metadata\", \"false\")\n ):\n return True\n if \"pattern\" not in node.attrib:\n return False\n pattern = node.attrib[\"pattern\"]\n regex_pattern = NAMED_PATTERNS.get(pattern, pattern)\n # TODO error on wrong pattern or non-regexp\n if \"(?P<ext>\" in regex_pattern:\n return True", "def is_temporal(axis):\n return (axis.lower() in temporal_axes)", "def test_convert_azfp_01a_notemperature_notilt(azfp_path):\n azfp_01a_path = azfp_path / 'rutgers_glider_notemperature/22052500.01A'\n azfp_xml_path = azfp_path / 'rutgers_glider_notemperature/22052501.XML'\n\n echodata = open_raw(\n raw_file=azfp_01a_path, sonar_model='AZFP', xml_path=azfp_xml_path\n )\n\n # Temperature variable is present in the Environment group and its values are all nan\n assert \"temperature\" in echodata[\"Environment\"]\n assert echodata[\"Environment\"][\"temperature\"].isnull().all()\n\n # Tilt variables are present in the Platform group and their values are all nan\n assert \"tilt_x\" in echodata[\"Platform\"]\n assert \"tilt_y\" in echodata[\"Platform\"]\n assert echodata[\"Platform\"][\"tilt_x\"].isnull().all()\n assert echodata[\"Platform\"][\"tilt_y\"].isnull().all()", "def _verify_format(s, format):\n r = re.compile(format)\n if r.match(s) is not None:\n return True\n return False", "def test_decode_external_timestamp(self):\n self.assertEqual(td.external_timestamp(), decoder.decode_external_timestamp(\n BytesIO(td.external_timestamp(True)), self.mask))", "def _validate_time(mapping: Mapping[str, Any],\n ref: str) -> Optional[SchemaError]:\n if 'format' in mapping:\n token_lines = None # type: Optional[List[List[lexery.Token]]]\n try:\n token_lines = mapry.strftime.tokenize(format=mapping['format'])\n except (lexery.Error, NotImplementedError) as err:\n return SchemaError(str(err), ref='{}/format'.format(ref))\n\n valerr = mapry.strftime.validate_time_tokens(token_lines=token_lines)\n if valerr is not None:\n return SchemaError(str(valerr), ref='{}/format'.format(ref))\n\n return None", "def test_parse_time_with_invalid_absolute_datetime(self):\n self.assert_TPVE(parse_time, \"\", None)\n self.assert_TPVE(parse_time, \"blahblah\", None)\n # This is detected as a YYYYMMDD string, but it's invalid.\n self.assert_TPVE(parse_time, \"20150231\", None)\n\n # Graphite accepts the following, we don't.\n self.assert_TPVE(parse_time, \"2015_02_01\", None)\n self.assert_TPVE(parse_time, \"12:35 20150201\", None)\n self.assert_TPVE(parse_time, \"12:3520150201\", None)\n self.assert_TPVE(parse_time, \"12/31/99\", None)\n self.assert_TPVE(parse_time, \"6pm today\", None)\n self.assert_TPVE(parse_time, \"noon tomorrow\", None)\n self.assert_TPVE(parse_time, \"january 1\", None)\n self.assert_TPVE(parse_time, \"monday\", None)", "def _validate_datetime(mapping: Mapping[str, Any],\n ref: str) -> Optional[SchemaError]:\n if 'format' in mapping:\n try:\n _ = mapry.strftime.tokenize(format=mapping['format'])\n except (lexery.Error, NotImplementedError) as err:\n return SchemaError(str(err), ref='{}/format'.format(ref))\n\n return None", "def timestamp_line(content):\n return re.match(r\"((\\d\\d:){2}\\d\\d),(\\d{3}) --> ((\\d\\d:){2}\\d\\d),(\\d{3})\", content) is not None", "def _is_probably_old_datfile_format(raw_data):\n return not _is_probably_new_datfile_format(raw_data) and \"UTC\" in raw_data", "def extract_annotation_temporal(self, text, annotationStartPos, annotationEndPos, annotationType, \n expDateStr = None, onsetDateStr = None, refExpDateStr = None, textType='vaers'):\n \n sentences = util.sentence_tokenize(text)\n \n n = len(sentences)\n locsSentStarts = [-1] * n\n curpt = 0\n for i in range(n):\n pos = text[curpt:].find(sentences[i])\n locsSentStarts[i] = pos + curpt\n curpt = locsSentStarts[i] + len(sentences[i])\n locsSentStarts.append(len(text))\n self.sentence_startPos = locsSentStarts\n \n AnnSent = None\n for sentnum, pos in enumerate(self.sentence_startPos):\n if annotationStartPos>=pos and annotationStartPos<=self.sentence_startPos[sentnum+1]-1:\n AnnSent = sentnum\n break\n \n featText = text[annotationStartPos:annotationEndPos]\n tags = self.regexp_tagger.tag(nltk.word_tokenize(featText))\n feat = Feature((annotationType, featText, AnnSent, tags, annotationStartPos, annotationEndPos))\n \n featurelist = [feat]\n\n taggedSentences = [] \n for sentnumber, sentence in enumerate(sentences):\n\n # tokenize each sentence to have a list of words to be processed\n tokens = nltk.word_tokenize(sentence)\n #run the above procedure\n sentence_to_parse = self.get_untagged(tokens)\n \n # Save tagged sentences for later computing of expose date\n taggedSentences.append(sentence_to_parse)\n \n\n self.initialization_text_data(text, sentences, taggedSentences, textType)\n \n expDateInput = self.parse_time_string(expDateStr)\n onsetDateInput = self.parse_time_string(onsetDateStr) \n receiveDate = self.parse_time_string(refExpDateStr) \n \n self.exposureDate = expDateInput\n self.onsetDate = onsetDateInput\n self.receiveDate = receiveDate\n self.exposureDateConfidence = 0\n self.onsetDateConfidence = 0\n \n ##: Obtain timex list\n timexList = timexan.annotateTimexes(self.text, expDateInput) \n \n self.sentence_full_tags = self.create_sentence_full_tags(featurelist, timexList)\n \n timexList = self.preprocess_timex_list(timexList, featurelist)\n \n ###: divide features that contain multiple timexes\n featurelist = self.divide_feature_containing_multiple_timexes(featurelist, timexList)\n \n featurelist = self.create_feature_timex_association(featurelist, timexList)\n \n timexList = self.construct_timeline(timexList, featurelist)\n \n featurelist = self.process_feature_durations(featurelist)\n \n featurelist = self.postprocess_features(featurelist)\n \n feature = featurelist[0]\n tlink = feature.getTlink()\n if not tlink:\n return ('', '')\n \n timexes = [t for t in tlink.getTimexes() if t.getDateTime()]\n if not timexes:\n return ('', '')\n \n if len(timexes)==1:\n tStart = timexes[0].getDateTime()\n tEnd = tStart\n else:\n tStart = timexes[0].getDateTime()\n tEnd = timexes[1].getDateTime()\n \n strTimeStart = tStart.isoformat().split('T')[0]\n strTimeEnd = tEnd.isoformat().split('T')[0]\n \n return (strTimeStart, strTimeEnd)", "def test_st_annotation00101m1_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/SType/ST_annotation/ST_annotation00101m/ST_annotation00101m1.xsd\",\n instance=\"sunData/SType/ST_annotation/ST_annotation00101m/ST_annotation00101m1_p.xml\",\n class_name=\"Test\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def test_time_requirement(self):\n test_string = \"AlanTimeZT\"\n test_passes = False\n try:\n self.parser.extract_zt(test_string)\n test_passes = False\n except:\n test_passes = True\n self.assertTrue(test_passes)", "def valid_format(self):\n\n # If candidate is None, return true\n if not self.dt:\n print \"dt empty\"\n return True\n\n # Verify if time format is ok and stores in into a time-tuple format\n try:\n stime = datetime.strptime(self.dt, \"%Y-%m-%d %H:%M:%S\")\n except ValueError:\n return False\n else:\n return True", "def validaAnno(anno):\n valid = ((len(anno) == 4) and (int(anno)) > 1970)\n if (valid and((len(anno) == 4) and (int(anno)) > 1970)):\n valid=True\n if(valid == False):\n print(\"anno invalido.\")\n labelinformativo('anno invalido')\n return valid", "def test_annotation00101m7_positive_537(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/Wildcard/annotation/annotation00101m/annotation00101m7.xsd\",\n instance=\"sunData/Wildcard/annotation/annotation00101m/annotation00101m7_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def test_st_annotation00101m2_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/SType/ST_annotation/ST_annotation00101m/ST_annotation00101m2.xsd\",\n instance=\"sunData/SType/ST_annotation/ST_annotation00101m/ST_annotation00101m2_p.xml\",\n class_name=\"Test\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def test_annotation00101m1_positive_540(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/Wildcard/annotation/annotation00101m/annotation00101m1.xsd\",\n instance=\"sunData/Wildcard/annotation/annotation00101m/annotation00101m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def GuessDateConcept(data_format):\n stripped_format = data_format.strip()\n\n if re.search('^y+$', stripped_format):\n return 'time:year'\n elif re.search('^([yM]|[^a-zA-Z0-9])+$', stripped_format):\n return 'time:month'\n elif re.search('^([yMd]|[^a-zA-Z0-9])+$', stripped_format):\n return 'time:day'\n else:\n raise DataSourceError(\n 'Can\\'t figure out time concept for format: %s' % data_format)", "def tzdata_filter(line: str) -> bool:\n if line and line[0] == 'Z' :\n return True\n return False", "def test_ambiguous_m(self):\n with self.assertRaises(ValueError):\n util.parse_relative_time_string(\"+3m\")", "def should_format(\n filename: Path, include_patterns: Iterable[str], exclude_patterns: Iterable[str]\n) -> Tuple[bool, str]:\n from fnmatch import fnmatch\n\n if any(fnmatch(os.path.abspath(filename), pattern) for pattern in exclude_patterns):\n return False, \"Excluded file\"\n\n filename_no_ext, ext = os.path.splitext(filename)\n # ignore .py file that has a jupytext configured notebook with the same base name\n ipynb_filename = filename_no_ext + \".ipynb\"\n if ext == \".py\" and os.path.isfile(ipynb_filename):\n with open(ipynb_filename, \"rb\") as f:\n if b\"jupytext\" not in f.read():\n return True, \"\"\n with open(filename, \"rb\") as f:\n if b\"jupytext:\" not in f.read():\n return True, \"\"\n return False, \"Jupytext generated file\"\n\n if any(fnmatch(os.path.basename(filename), pattern) for pattern in include_patterns):\n return True, \"\"\n\n return False, \"Unknown file type\"", "def has_annotations(filepath):\n return filepath.endswith('.ll') and '[#uses=' in open(filepath).read()", "def verify(timestamp):\n if not isinstance(timestamp, str):\n raise TypeError('\"{}\" is not str type'.format(type(timestamp)))\n elif match('^[0-9]{1,2}(:[0-9]{1,2}){1,2}(\\.[0-9]{1,9})?$', timestamp):\n return True\n return False", "def test_parse_time_absolute_date(self):\n self.assertEqual(\n parse_time(\"20150201\", None), datetime(2015, 2, 1, 0, 0, 0))\n self.assertEqual(\n parse_time(\"19700101\", None), datetime(1970, 1, 1, 0, 0, 0))\n self.assertEqual(\n parse_time(\"19010101\", None), datetime(1901, 1, 1, 0, 0, 0))\n self.assertEqual(\n parse_time(\"99991231\", None), datetime(9999, 12, 31, 0, 0, 0))", "def test_annotation00101m1_positive_378(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/Notation/annotation/annotation00101m/annotation00101m1.xsd\",\n instance=\"sunData/Notation/annotation/annotation00101m/annotation00101m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def test_add_patterns_warns_if_spaczz_type_unrecognized(ruler: SpaczzRuler) -> None:\n with pytest.warns(PatternTypeWarning):\n ruler.add_patterns([{\"label\": \"GPE\", \"pattern\": \"Montana\", \"type\": \"invalid\"}])", "def test_short_format_contains_year(self):\n locale = {\n 'timeformat': '%H:%M',\n 'dateformat': '%Y-%m-%d',\n 'longdateformat': '%Y-%m-%d',\n 'datetimeformat': '%Y-%m-%d %H:%M',\n 'longdatetimeformat': '%Y-%m-%d %H:%M',\n }\n assert (dt.datetime(2017, 1, 1), dt.datetime(2017, 1, 2), True) == \\\n guessrangefstr('2017-1-1 2017-1-1', locale=locale)" ]
[ "0.592053", "0.5545554", "0.5531866", "0.5466324", "0.535955", "0.5319224", "0.52924705", "0.529213", "0.5254828", "0.5219572", "0.5217721", "0.520693", "0.5193551", "0.51913005", "0.51366746", "0.5125415", "0.51020896", "0.5094837", "0.5088116", "0.50763893", "0.50740904", "0.5073154", "0.5070205", "0.50528836", "0.5052372", "0.5043025", "0.5040494", "0.5021802", "0.50085765", "0.49824786" ]
0.58306456
1
this function filters absolute timexes using the patterns above
def filter_absolute_timexes(): timexes = pd.read_excel('../TimeDatasets/i2b2 Data/i2b2_timexe_annotations.xlsx') timexes = timexes[timexes['type'].isin(['DATE', 'TIME'])] print('DATE AND TIME') print(timexes) absolute_timexes = timexes[ [is_absolute_timexe(string) for string in timexes['ann_text']] ] print('ABSOLUTE TIMEXES') print(absolute_timexes) absolute_timexes.to_excel('absolute_timexes.xlsx') relative_timexes = timexes[[(not is_absolute_timexe(string)) for string in timexes['ann_text']]] relative_timexes.to_excel('filtered_timexes.xlsx') # add the absolute characteristic as a boolean attribute of the timexe dataframe timexes['absolute'] = [is_absolute_timexe(string) for string in timexes['ann_text']] timexes.to_excel('date_and_time.xlsx') print('RELATIVE TIMEXES') print(relative_timexes) # Print the results print(len(timexes[(timexes.absolute == False) & (timexes.test == False)]['docname'].unique())) train_relatives = timexes[(timexes.absolute == False) & (timexes.test == False)] test_relatives = timexes[(timexes.absolute == False) & (timexes.test == True)] print('Train set : ' + str(len(train_relatives)) + " relative time expressions") print('Test set : ' + str(len(test_relatives)) + " relative time expressions") return relative_timexes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter_time_slices(time_slices, apt_no, exp_no):\n # Removing the extraneous time slices\n if apt_no == '102A' and exp_no == '3':\n discard_ts = time_slices[\n (time_slices.phase == 'Not Found') & (time_slices.magnitude < 100)]\n time_slices = time_slices.ix[time_slices.index - discard_ts.index]\n\n elif apt_no == '603':\n print \"here\"\n # Likely power consumption of fridge is 110-150\n time_slices = time_slices[(time_slices.magnitude < 110) | (time_slices.magnitude > 150) &\n (time_slices.type == 'power')]\n # 25-26Nov\n if exp_no == '25-26Nov':\n time_slices = time_slices[time_slices.end_time < 1385404505]\n elif exp_no == '26-27Nov':\n time_slices = time_slices[time_slices.end_time < 1385492334]\n\n elif apt_no == '703':\n # Likely power consumption of fridge is 130-152\n fridge_ts = time_slices[(time_slices.magnitude >= 130) & (time_slices.magnitude <= 170) &\n (time_slices.type == 'power')]\n time_slices = time_slices.ix[time_slices.index - fridge_ts.index]\n\n # Likely power consumption of geyser > 2000 but on light phase > 1000\n geyser_ts = time_slices[(time_slices.magnitude > 1000) & (time_slices.type == 'light')]\n time_slices = time_slices.ix[time_slices.index - geyser_ts.index]\n\n # 26-27Nov\n if exp_no == '26-27Nov':\n washing_ts = time_slices[\n (time_slices.start_time >= 1385470967) & (time_slices.end_time <= 1385471880)]\n time_slices = time_slices.ix[time_slices.index - washing_ts.index]\n\n # 28-29Nov\n if exp_no == '28-29Nov':\n time_slices = time_slices[\n (time_slices.start_time < 1385646060) | (time_slices.end_time > 1385648143)]\n\n # Removing time slices with duration less than 30 seconds\n idx_list = []\n for idx in time_slices.index:\n start_time = time_slices.ix[idx]['start_time']\n end_time = time_slices.ix[idx]['end_time']\n magnitude = time_slices.ix[idx]['magnitude']\n\n time_diff = end_time - start_time\n\n if time_diff < 30 and magnitude < 80:\n print \"idx\", idx, \"time_diff\", time_diff, \"magnitude\", magnitude\n # Qualified for filtering it\n idx_list.append(idx)\n time_slices = time_slices.ix[time_slices.index - idx_list]\n\n return time_slices", "def filter_lower_datetime(time, list_time):\n return [t for t in list_time if t <= time]", "def _filterTimes(self):\n print(self.tRange)\n idT = np.where((self.tRange[0] > np.array(self.rawD['Epoch'][:])) & \n (self.tRange[1] < np.array(self.rawD['Epoch'][:])))[0]\n #print(self.rawD['Epoch'][:100])\n print(idT)\n # Filter data\n for key in filter(lambda x: ('Epoch' in x or \n ('Counts' in x and x[-1] == 's')), self.rawD.keys()):\n self.d[key] = self.rawD[key].copy()[idT]\n return", "def evaluate_all_relative_timexes(self, timexList, docFeatList):\n \n for timex in timexList:\n ##: Only process relative timex\n if timex.getType()!='REL' or timex.getRole()=='IGNORE': continue\n \n ###: evaluate timexes in formats of weekday (e.g. \"Monday\"), day # (e.g., \"day 3\", \"day three\") \n time_day_count = self.evaluate_timex_in_day_count(timex, timexList)\n if time_day_count:\n timex.setDateTime(time_day_count)\n self.timexImpactZones = self.update_timex_impact_zone_with_timex(timex, self.timexImpactZones, timexList)\n continue\n \n sentNum = timex.getSentNum()\n\n tags = []\n full_tags = self.sentence_full_tags[sentNum]\n posTimex = []\n for i, tg in enumerate(full_tags):\n ##: Label other timexes as 'unimportant' for this timex to avoid mistakes\n if tg[1]=='Timex' and tg[0]!=timex.getString():\n tags.append((tg[0], 'unimportant'))\n else:\n tags.append((tg[0], tg[1]))\n \n if tg[0]==timex.getString():\n posTimex.append(i)\n \n (search_direction, rel) = self.find_ref_time_direction(tags)\n \n if not search_direction:\n continue\n \n ##: add to clause zone after TimexSignal \n if search_direction == 'Forward':\n tpos = posTimex[0]\n signal = [tg[0] for tg in tags[max(0,tpos-1):] if tg[1]=='TimexSignal']\n if signal and signal[0] in self.clause_signals:\n sentence = self.sentences[sentNum]\n clause_start = full_tags[tpos][2] + len(full_tags[tpos][0])\n clause_end = len(sentence)\n endPos = sentence[clause_start:clause_end].find(',')\n if endPos >= 0:\n clause_end = endPos + clause_start\n self.clauseZones.append((self.sentence_startPos[sentNum] + clause_start, \n self.sentence_startPos[sentNum] + clause_end, sentNum))\n \n ref_time = self.find_relative_time_reference(search_direction, tags, timex, posTimex[0])\n if not ref_time: \n continue\n \n newtime = timexan.getRelativeDatetime(timex.string, rel, ref_time)\n \n timex.setDateTime(newtime)\n \n self.timexImpactZones = self.update_timex_impact_zone_with_timex(timex, self.timexImpactZones, timexList)\n \n for timex in timexList:\n ##: Only process relative timex\n if timex.getType()!='REL' or timex.getRole()=='IGNORE' or not timex.getDateTime(): continue\n \n self.timeReferences = self.update_time_references_with_relative_timex(timex, self.timeReferences, timexList, docFeatList)\n \n return timexList", "def filter_dt(tp,ts, show_plot, lower_delta,min_len):\n import numpy\n import matplotlib.pyplot as plt\n ####################################\n def array_none_removal(array_dt):\n output=[]\n for index in range(len(array_dt)):\n if array_dt[index]!=None:\n output.append(array_dt[index])\n return(output)\n \n ####################################\n # Calculate delta t based in P and S1 arrival\n delta_t=[]\n for gph_index in range(len(tp)):\n # verify is the pp and sp1 are NOT nan\n if tp[gph_index]!=None and ts[gph_index]!=None:\n #if (not math.isnan(tp[gph_index])) and (not math.isnan(ts[gph_index])):\n delta_t.append(ts[gph_index]-tp[gph_index])\n else:\n delta_t.append(None)\n\n ####################################\n #%% Filter delta_t\n \n # Remove Nones from delta t to calculate its stats\n dt_numbers=array_none_removal(delta_t)\n dt_numbers=list(filter(lambda x:x>0.25, dt_numbers)) # remove values lower than 0.2\n \n # Build the new delta_t array with outliers removed\n dt_filtered=[]\n \n for dt_index in range(len(delta_t)):\n if delta_t[dt_index]!= None:\n if delta_t[dt_index] <= (numpy.median(dt_numbers)+numpy.std(dt_numbers)) and delta_t[dt_index] >= (numpy.median(dt_numbers)-numpy.std(dt_numbers)) and delta_t[dt_index]>=lower_delta:\n dt_filtered.append(delta_t[dt_index])\n else:\n dt_filtered.append(None)\n else:\n dt_filtered.append(None)\n \n dt_filtered_numbers=array_none_removal(dt_filtered)\n #print('Len filtered ',len(dt_filtered_numbers))\n \n ####################################\n if show_plot==True:# and len(dt_filtered_numbers)>=min_len: \n plt.figure(1, figsize=(8,1))\n plt.plot(dt_filtered,'.k')\n plt.axhline(y=numpy.median(dt_numbers),c='g', linestyle='--')#, lw=0.5)\n plt.axhline(y=numpy.median(dt_numbers)+numpy.std(dt_numbers),c='r', linestyle=':')#, lw=0.5)\n plt.axhline(y=numpy.median(dt_numbers)-numpy.std(dt_numbers),c='r', linestyle=':')#,lw=0.5)\n plt.ylim(0.5,1.5)\n plt.xlabel('Gph id')\n plt.ylabel('ts -tp (s)')\n plt.title('ATP ts-tp') \n plt.show()\n plt.close()\n #################################### \n return(dt_filtered,dt_filtered_numbers)", "def filter_timespans(self, minTime=2.0):\n igList = [ig.Rsc['PAIR'] for ig in self.Set if abs(float(ig.Rsc['TIME_SPAN_YEAR'])) < minTime]\n self.Set.omit(IG=igList)", "def solve_filter_time_interval(self):\n if 'interval' in self.filter_request:\n temp_list_pack = []\n temp_list_pack.append(self.list_pack[0])\n curr_time = pandas.to_datetime(self.list_pack[0]['time_stamp'])\n filter_interval = int(self.filter_request['interval'])\n\n if filter_interval <= 0:\n filter_interval = int(1)\n\n for i in self.list_pack:\n pack_time = pandas.to_datetime(i['time_stamp'])\n if (curr_time + pandas.to_timedelta(filter_interval, unit='s')) <= pack_time:\n temp_list_pack.append(i)\n curr_time = pandas.to_datetime(i['time_stamp'])\n\n self.list_pack = temp_list_pack", "def times_filter(d, times, meets_criteria=matches_timestr):\n mapping = map(type, times)\n if [ str, type(None), type(None) ] == mapping and meets_criteria(times[0]):\n d1 = doytimestr_to_datetime('%d:%s:00' % (d[0].year, times[0].replace('/',':')))\n #return '%s' % d1\n return d1, d1, 0\n elif [ str, str, type(None) ] == mapping and meets_criteria(times[0]) and meets_criteria(times[1]):\n d1 = doytimestr_to_datetime('%d:%s:00' % (d[0].year, times[0].replace('/',':')))\n d2 = doytimestr_to_datetime('%d:%s:00' % (d[1].year, times[1].replace('/',':')))\n #return '%s to %s' % (d1, d2)\n return d1, d2, timedelta_hours(d2-d1)\n else:\n #return ''\n return None, None, None", "def trimtimes(time, elmbeg, elmend, preft = 0.0, suft = 0.0):\n valididx = np.zeros(len(time),dtype='bool')\n \n elmbeg = elmbeg - preft\n elmend = elmend + suft\n for i in range(len(time)):\n t = time[i]\n boolbeg = t>=elmbeg\n boolend = t<=elmend\n boolelm = boolbeg & boolend\n valididx[i] = np.sum(boolelm)\n \n #To use only data outside of ELMs\n valididx = np.invert(valididx)\n return time[valididx], valididx", "def _hist_filter_ts(commands, start_time, end_time):\n for cmd in commands:\n if start_time <= cmd[1] < end_time:\n yield cmd", "def test_3dtproject_temporal_filter_wf_scrubs(self):\n\n self.wf = build_3dtproject_temporal_filter(\n bpHigh= .9, bpLow= 0.005, tr=2,\n scrub_targets=True,\n import_file=self.sample_raw_image,\n export_file=self.export_path,\n base_dir=self.test_path, crashdump_dir=self.test_path,\n mask_file=self.sample_raw_image_mask\n )\n scrub_targets = [1] * 100\n scrub_targets[46:52] = [0] * 6\n self.highlight_ranges = [(45.5, 52.5)]\n self.wf.inputs.inputnode.scrub_targets = scrub_targets", "def FilterScan(self, time_ranges, start_time, end_time, upload_time):\r\n # Always add it to total time_range\r\n self.total_time_range.AddScan(start_time, end_time,\r\n upload_time)\r\n\r\n for time_range in time_ranges:\r\n if time_range.TimeisValid(start_time):\r\n time_range.AddScan(start_time, end_time, upload_time)\r\n return\r\n\r\n logging.warning(\"Scan does not match any filters\")", "def smoothing(time, heart, pace):\n\n heart_filt = []\n pace_filt = []\n for ind in range(60, len(time)):\n segment = (heart[(ind-60):ind])\n if (max(segment)-min(segment)) < 15:\n print \"got one!\"\n heart_filt.append(heart[ind-30]) # TODO improvement: use the average\n pace_filt.append(pace[ind-30])\n return (heart_filt, pace_filt)", "def time_filter(target_time, format, delta_hours):\n return datetime.strptime(target_time, format) + timedelta(hours=delta_hours) >= datetime.utcnow()", "def exclude_times(self, *tuples):\n for item in tuples:\n if isinstance(item, TimeRange):\n self._excluded_times.append(item)\n else:\n self.exclude_time(*item)\n return self", "def filter_times(timestamps, time_difference):\n timestamps = sorted(set(timestamps))\n\n filtered_timestamps = []\n for current_timestamp in timestamps:\n if not filtered_timestamps or current_timestamp - filtered_timestamps[-1] > time_difference:\n filtered_timestamps.append(current_timestamp)\n\n return filtered_timestamps", "def __call__(self,time):\n \n fname = []\n tind =[]\n for t in time:\n flag=1\n for f in self.timelookup.keys():\n\n if t >= self.timelookup[f][0] and t<=self.timelookup[f][-1]:\n# print 'Found tstep %s'%datetime.strptime(t,'%Y-%m-%d %H:%M:%S')\n tind.append(othertime.findNearest(t,self.timelookup[f][:]))\n fname.append(f)\n flag=0\n\n# if flag:\n# print 'Warning - could not find matching file for time:%s'%datetime.strptime(t,'%Y-%m-%d %H:%M:%S')\n# tind.append(-1)\n# fname.append(-1)\n \n return tind, fname", "def handle_time_filter(base_case: Optional[Dict[str, Any]] = None, unit_value: Optional[str] = None,\n amount_value: Optional[int] = None, time_from: Optional[str] = None,\n time_to: Optional[str] = None) -> Dict[str, Any]:\n if (time_from or time_to) and (unit_value or amount_value):\n raise DemistoException(ERROR_TOO_MANY_ARGS)\n elif (time_from and not time_to) or (amount_value and not unit_value):\n raise DemistoException(ERROR_NOT_ENOUGH_ARGS)\n\n if unit_value:\n if amount_value:\n # amount is only for relative time - defines a window of time from a given point of time in the past until now\n if unit_value not in RELATIVE_TIME_UNIT_OPTIONS:\n raise DemistoException(ERROR_RELATIVE_TIME_UNIT)\n return {'type': 'relative', 'value': {'amount': arg_to_number(amount_value), 'unit': unit_value}}\n\n else:\n # using to_now time - represents a window of time from the start of the time unit given until now\n if unit_value not in TO_NOW_TIME_UNIT_OPTIONS:\n raise DemistoException(ERROR_TO_NOW_TIME_UNIT)\n return {'type': 'to_now', 'value': unit_value}\n\n elif time_to:\n # using absolute time\n if time_from:\n return {'type': 'absolute', 'value': {'startTime': convert_date_to_unix(time_from),\n 'endTime': convert_date_to_unix(time_to)}}\n else:\n # alert dismissal requires only an end time in the future\n return {'type': 'absolute', 'value': {'endTime': convert_date_to_unix(time_to)}}\n\n return base_case or TIME_FILTER_BASE_CASE", "def test_plt_mag_time():\n\n ta = WATA()\n wata_data = define_testdata()\n ta.source = ColumnDataSource(data=wata_data)\n ta.add_time_column()\n ta.setup_date_range()\n\n # create the arrays per filter and readout pattern\n nrsrapid_f140x, nrsrapid_f110w, nrsrapid_clear = [], [], []\n nrsrapidd6_f140x, nrsrapidd6_f110w, nrsrapidd6_clear = [], [], []\n filter_used, readout = ta.source.data['tafilter'], ta.source.data['readout']\n max_val_box, time_arr = ta.source.data['max_val_box'], ta.source.data['time_arr']\n for i, val in enumerate(max_val_box):\n if '140' in filter_used[i]:\n if readout[i].lower() == 'nrsrapid':\n nrsrapid_f140x.append(val)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif readout[i].lower() == 'nrsrapidd6':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(val)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif '110' in filter_used[i]:\n if readout[i].lower() == 'nrsrapid':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(val)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif readout[i].lower() == 'nrsrapidd6':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(val)\n nrsrapidd6_clear.append(np.NaN)\n else:\n if readout[i].lower() == 'nrsrapid':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(val)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(np.NaN)\n elif readout[i].lower() == 'nrsrapidd6':\n nrsrapid_f140x.append(np.NaN)\n nrsrapid_f110w.append(np.NaN)\n nrsrapid_clear.append(np.NaN)\n nrsrapidd6_f140x.append(np.NaN)\n nrsrapidd6_f110w.append(np.NaN)\n nrsrapidd6_clear.append(val)\n # add to the bokeh data structure\n ta.source.data[\"nrsrapid_f140x\"] = nrsrapid_f140x\n ta.source.data[\"nrsrapid_f110w\"] = nrsrapid_f110w\n ta.source.data[\"nrsrapid_clear\"] = nrsrapid_clear\n ta.source.data[\"nrsrapidd6_f140x\"] = nrsrapidd6_f140x\n ta.source.data[\"nrsrapidd6_f110w\"] = nrsrapidd6_f110w\n ta.source.data[\"nrsrapidd6_clear\"] = nrsrapidd6_clear\n result = ta.plt_mag_time()\n\n assert bokeh_plot_type == type(result)", "def _FilterMMarks(self):\n\n to_remove = []\n tplus1 = datetime.datetime.now() - datetime.timedelta(hours=1)\n\n for (i, (m1, m2)) in enumerate(self._mmarks):\n if (m1.starttime < tplus1):\n to_remove.append(i)\n\n to_remove.reverse()\n for i in to_remove:\n self._mmarks.pop(i)", "def is_absolute_timexe(string):\n patterns = [\n '(\\d+)', # just digits\n '(\\d+/\\d+/\\d+)',\n '(\\d+/\\d+)',\n '(\\d+-\\d+-\\d+)',\n '(\\d+-\\d+)',\n '^\\d{1,2}\\/\\d{1,2}\\/\\d{4}$', # matches dates of the form XX/XX/YYYY where XX can be 1 or 2 digits long and YYYY is always 4 digits long.\n \"^((([0]?[1-9]|1[0-2])(:|\\.)[0-5][0-9]((:|\\.)[0-5][0-9])?( )?(AM|am|aM|Am|PM|pm|pM|Pm))|(([0]?[0-9]|1[0-9]|2[0-3])(:|\\.)[0-5][0-9]((:|\\.)[0-5][0-9])?))$\", # Matches times seperated by either : or . will match a 24 hour time, or a 12 hour time with AM or PM specified. Allows 0-59 minutes, and 0-59 seconds. Seconds are not required.\n \"^ ((0[1 - 9]) | (1[0 - 2]))\\ / (\\d{2})$\"\n ]\n\n for pattern in patterns:\n if re.match(pattern, string) is not None:\n return True\n\n return False", "def test_is_valid_timestamp_valid(self):\n timestamps = (\n \"XsyRkw\",\n \"Xrim9Q\",\n \"XsyR-w\",\n \"XsySD_\",\n \"Dn9r_A\",\n )\n\n for timestamp in timestamps:\n with self.subTest(timestamp=timestamp):\n result = TokenRemover.is_valid_timestamp(timestamp)\n self.assertTrue(result)", "def filter_events_before_infection(events, admittime, infection_time, preceding_time,\n datetime_pattern=DATETIME_PATTERN, time_key=\"charttime\"):\n admittime_datetime = datetime.strptime(admittime, datetime_pattern)\n infection_datetime = datetime.strptime(infection_time, datetime_pattern) - timedelta(hours=preceding_time)\n new_events = []\n for event in events:\n # Pega a data do evento e o transforma em datetime\n event_datetime = datetime.strptime(event[time_key], datetime_pattern)\n # Compara se o evento aconteceu entre a data de adimissão e a data de infecção (já alterada)\n if event_datetime > admittime_datetime and event_datetime <= infection_datetime:\n new_events.append(event)\n return new_events", "def test_overlap():\n events = [['Event', '2017-11-21T10:00:00-08:00', '2017-11-21T11:00:00-08:00'],\n ['Event', '2017-11-21T10:30:00-08:00', '2017-11-21T11:20:00-08:00']]\n freetimes, _ = free(events, 9, 0, 17, 0, day_range, 30)\n fmt_freetime = output_format(freetimes)\n for i in fmt_freetime:\n print(i)\n assert fmt_freetime == ['Tue, Nov 21, 9:00 am to Tue, Nov 21, 10:00 am.',\n 'Tue, Nov 21, 11:20 am to Tue, Nov 21, 5:00 pm.',\n 'Wed, Nov 22, 9:00 am to Wed, Nov 22, 5:00 pm.',\n 'Thu, Nov 23, 9:00 am to Thu, Nov 23, 5:00 pm.',\n 'Fri, Nov 24, 9:00 am to Fri, Nov 24, 5:00 pm.',\n 'Sat, Nov 25, 9:00 am to Sat, Nov 25, 5:00 pm.',\n 'Sun, Nov 26, 9:00 am to Sun, Nov 26, 5:00 pm.',\n 'Mon, Nov 27, 9:00 am to Mon, Nov 27, 5:00 pm.']", "def create_tlinks_from_timexes(self, timexes, sentence):\n \n if not timexes:\n return []\n \n if len(timexes)==1:\n return [TLink(timexes[0])]\n \n timexStrs = [t.getString() for t in timexes]\n sentence = sentence.lower()\n tokens, indices = util.tokenize_with_reserved_strings(sentence, timexStrs)\n numTK = len(tokens)\n \n tlinkList = []\n tlink = TLink(timexes[0])\n nn = range(1, len(timexes))\n for ind in nn:\n if indices[ind]-indices[ind-1]==2:\n midword = tokens[indices[ind]-1]\n #########################################################\n ###: Search for case: on xxx or xxx\n #########################################################\n if midword=='or':\n tlink.addTimex(timexes[ind])\n tlink.setType('OR')\n timexes[ind].setRole('IGNORE')\n continue\n \n #########################################################\n ###: Search for case: from xxx to xxx, between xxx and xxx\n #########################################################\n isTimeRange = False\n if tokens[indices[ind]-1] in ['to', 'until', 'till']:\n isTimeRange = True\n if not isTimeRange and indices[ind-1]>0 and midword=='and' and tokens[indices[ind-1]-1]=='between':\n isTimeRange = True\n \n if isTimeRange:\n tlink.addTimex(timexes[ind])\n tlink.setType('BETWEEN')\n continue\n \n #########################################################\n ###: Search for case: 5/12-15/2014 or May/12-15/2014, where two timexes share the same string\n #########################################################\n if timexes[ind].getStartPos() == timexes[ind-1].getStartPos():\n tlink.addTimex(timexes[ind])\n tlink.setType('BETWEEN')\n continue\n \n #########################################################\n ###: Search for case: 5/12-15/2014 or May/12-15/2014, where two timexes share the same string\n #########################################################\n if timexes[ind].getType()=='DATE' and timexes[ind-1].getType()=='DATE' and tokens[indices[ind]-1]=='-':\n tlink.addTimex(timexes[ind])\n tlink.setType('BETWEEN')\n continue\n \n #########################################################\n ###: Search for case: 5/12/14, 5/13/14, and 5/16/14; or case: 5/12/14 and on 5/13/14\n #########################################################\n if timexes[ind].getType()=='DATE' and timexes[ind-1].getType()==timexes[ind].getType() and (\n (indices[ind]-indices[ind-1]==2 and tokens[indices[ind]-1] in [',', 'and', '&']) \n or (indices[ind]-indices[ind-1]==3 and tokens[indices[ind]-2]==',' and tokens[indices[ind]-1] in ['and' ,'&'])\n or (indices[ind]-indices[ind-1]==3 and tokens[indices[ind]-2]=='and' and tokens[indices[ind]-1]=='on') \n or (indices[ind]-indices[ind-1]==3 and tokens[indices[ind]-2]==',' and tokens[indices[ind]-1]=='on')): \n tlink.addTimex(timexes[ind])\n tlink.setType('MULTIPLE')\n continue\n \n #########################################################\n ###: Search for the case: ...5/12/2014, 3 days after vaccination, ...; ...5/12/2014, less than 3 days after vaccination, ...; \n ##: or ...5/12/2014, after 3 days of vaccination, ...\n #########################################################\n if timexes[ind-1].getType()=='DATE' and timexes[ind].getType()=='REL':\n if (indices[ind]-indices[ind-1]==2 and tokens[indices[ind]-1] ==',') \\\n or (indices[ind]-indices[ind-1]==3 and tokens[indices[ind]-1] =='after' and tokens[indices[ind]-2] ==',') \\\n or (indices[ind]-indices[ind-1]<=4 and indices[ind]+1<numTK and tokens[indices[ind]+1] =='after' \\\n and tokens[indices[ind-1]+1] ==',') and tokens[indices[ind]-1] !='and':\n tlink.addTimex(timexes[ind])\n tlink.setType('ASSOCIATE')\n timexes[ind].setRole('IGNORE')\n continue\n \n ##: Start a new TLink\n tlinkList.append(tlink)\n tlink = TLink(timexes[ind])\n \n tlinkList.append(tlink)\n \n return tlinkList", "def filter_by_date(items, start_time, end_time=None):\n start_time = parser.parse(start_time + \"UTC\").timestamp()\n if end_time:\n end_time = parser.parse(end_time + \"UTC\").timestamp()\n else:\n end_time = time.time()\n\n filtered_items = []\n for item in items:\n if 'time' in item:\n item_time = item['time']\n elif 'timestamp' in item:\n item_time = item['timestamp']\n timestamp = parser.parse(item_time + \"UTC\").timestamp()\n if end_time > timestamp > start_time:\n filtered_items.append(item)\n\n return filtered_items", "def time_unwrap(val_timestamps):\n a=val_timestamps.shape[0]\n val_time =val_timestamps.astype('int64')\n for i in range(a-1):\n if val_time[i+1]-val_time[i]<-1*2**25:\n val_time[i+1:]+=2**26\n\n return(val_time)", "def test_parse_time_with_invalid_interval(self):\n now = datetime(2015, 2, 1, 0, 0, 0)\n self.assert_TPVE(parse_time, \"-0\", now)\n self.assert_TPVE(parse_time, \"-12\", now)\n self.assert_TPVE(parse_time, \"-12fortnights\", now)\n self.assert_TPVE(parse_time, \"-20150101\", now)", "def timeviewUrls(pattern, view, kwargs=None, name=None):\n results = [(pattern, view, kwargs, name)]\n tail = ''\n mtail = re.search('(/+\\+?\\\\*?\\??\\$?)$', pattern)\n if mtail:\n tail = mtail.group(1)\n pattern = pattern[:len(pattern) - len(tail)]\n for filter in ('/(?P<year>\\d{4})-(?P<month>\\d{2})-(?P<day>\\d{2})/' + \\\n '(?P<hour>\\d\\d)-(?P<minute>\\d\\d)',\n '/(?P<year>\\d{4})-(?P<month>\\d{2})-(?P<day>\\d{2})'):\n results += [(pattern + filter + tail, view, kwargs)]\n return results", "def filter_patterns(self,threshold):\n if threshold is not None:\n pass #learn threshold\n return filter(lambda pattern: pattern.score > threshold, self.patterns)" ]
[ "0.57943213", "0.57883453", "0.56736153", "0.55306923", "0.5493193", "0.54850394", "0.5452034", "0.5439739", "0.53424335", "0.5314489", "0.5308615", "0.52857965", "0.5283658", "0.5208818", "0.5194075", "0.5149346", "0.5143365", "0.50963813", "0.50858736", "0.50754476", "0.5069659", "0.5054594", "0.50200325", "0.50059474", "0.49993616", "0.49736708", "0.4972875", "0.49713618", "0.49679625", "0.49629045" ]
0.6755016
0
check if two trees are structurally identical
def is_identical(self, tree1, tree2): if not tree1 and not tree2: return True elif tree1 and tree2: return (tree1.root == tree2.root and self.is_identical(tree1.left,tree2.left) and self.is_identical(tree1.right, tree2.right)) else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _exact_compare(tree1, tree2):\n attrs = ['name', 'length', 'support']\n for n1, n2 in zip(tree1.postorder(), tree2.postorder()):\n for attr in attrs:\n if getattr(n1, attr, None) != getattr(n2, attr, None):\n return False\n return True", "def test_deep_equals(obja, objb, isequal):\n\n objatree = wo.typedtree(obja)\n objbtree = wo.typedtree(objb)\n match = objatree == objbtree\n ok = match == isequal\n\n if ok:\n s = \"pass\"\n else:\n s = \"fail\"\n\n print(f\"{obja} == {objb} is {match} : {s}\")\n return ok", "def _node_equal(self, other):\n # We're not equal if other isn't a Node, or if other is a different class.\n if not isinstance(other, Node) or not isinstance(other, self.__class__):\n return False\n # Loop through all children, checking whether they are equal\n for self_child, other_child in zip(self.getChildren(), other.getChildren()):\n if not self_child == other_child:\n return False\n # If we get here, our two nodes much be equal\n return True", "def compare_topology(tree1, tree2):\n n2p1, n2p2 = ({node.name: node.parent.name\n for node in tree.traverse() if not node.is_root()}\n for tree in (tree1, tree2))\n return n2p1 == n2p2", "def __eq__(self, other):\n return type(self) == type(other) and self.node is other.node", "def isSameTree(self, node1, node2):\n # print(\"isSameTree call for {} and {}\".format(node1.id, node2.id))\n\n if node1.id == node2.id:\n return True\n if node1.value == node2.value:\n # Compare children, in sorted order based on value\n node1Children = list(\n sorted(\n node1.neighbors,\n key=lambda node:\n node.value))\n node2Children = list(\n sorted(\n node2.neighbors,\n key=lambda node:\n node.value))\n\n if len(node1Children) == len(node2Children):\n # For identical trees, A list of nieghbors\n # in sorted (based on value) order:\n # Should have same length\n # At each position, values are same (verify recursively)\n for i in range(len(node1Children)):\n if not self.isSameTree(node1Children[i], node2Children[i]):\n return False\n # All neighbor pairs verified\n return True", "def __eq__(self, other: 'Tree') ->bool:\n return (type(self) is type(other) and\n self.value == other.value and\n self.children == other.children)", "def are_two_trees_incompatible(tree1, tree2):\n leaves1 = get_leaf_set(tree1)\n leaves2 = get_leaf_set(tree2)\n shared = list(leaves1.intersection(leaves2))\n\n taxa = dendropy.TaxonNamespace(shared) # CRITICAL!!!\n\n # No topological information\n if len(shared) < 4:\n return False\n\n # Move trees onto shared leaf set\n tree1.retain_taxa_with_labels(shared)\n tree1.migrate_taxon_namespace(taxa)\n tree1.is_rooted = False\n tree1.collapse_basal_bifurcation()\n tree1.update_bipartitions()\n\n tree2.retain_taxa_with_labels(shared)\n tree2.migrate_taxon_namespace(taxa)\n tree2.is_rooted = False\n tree2.collapse_basal_bifurcation()\n tree2.update_bipartitions()\n\n # Check for compatibility\n [fp, fn] = false_positives_and_negatives(tree1, tree2)\n if fp > 0 or fn > 0:\n return True\n else:\n return False", "def test_compare_old_to_new_method_to_create_trees(self):\n nodes = util.generate_sequence_of_points(2, 2)\n tree1 = kdtree.createNewTree(nodes)\n kdtree.visualize(tree1)\n \n sel_axis = (lambda axis: axis)\n tree2 = kdtree.createNewTree([[0.5, 0.5]],axis = 0, sel_axis= sel_axis)\n tree2.split2([0.25, 0.5], axis = 1)\n tree2.split2([0.75, 0.5], axis = 1)\n \n #left\n tree2.split2([0.25, 0.25], axis = 0, sel_axis = sel_axis)\n tree2.split2([0.25, 0.75], axis = 0, sel_axis = sel_axis)\n \n #right\n tree2.split2([0.75, 0.25], axis = 0, sel_axis = sel_axis)\n tree2.split2([0.75, 0.75], axis = 0, sel_axis = sel_axis)\n \n kdtree.visualize(tree2)\n \n for n in zip(kdtree.level_order(tree1), kdtree.level_order(tree2)):\n self.assertEqual(n[0].data, n[1].data, \"elements not equal\")\n \n if n[0].data is not None and n[1].data is not None:\n self.assertEqual(n[0].axis, n[1].axis, \"elements not equal\")", "def equivalent(kls, first, second):\n if first.empty() and second.empty():\n return True\n elif first.vertices.shape[0] != second.vertices.shape[0]:\n return False\n elif first.edges.shape[0] != second.edges.shape[0]:\n return False\n\n EPSILON = 1e-7\n\n vertex1, ct1 = np.unique(first.vertices, axis=0, return_counts=True)\n vertex2, ct2 = np.unique(second.vertices, axis=0, return_counts=True)\n \n vertex_match = np.all(np.abs(vertex1 - vertex2) < EPSILON)\n ct_match = np.all(ct1 == ct2)\n if not (vertex_match and ct_match):\n return False\n\n g1 = nx.Graph()\n g1.add_edges_from(first.edges)\n g2 = nx.Graph()\n g2.add_edges_from(second.edges)\n edges_match = nx.is_isomorphic(g1, g2)\n del g1 \n del g2\n\n if not edges_match:\n return False\n\n second_verts = {}\n for i, vert in enumerate(second.vertices):\n second_verts[tuple(vert)] = i\n \n attrs = [ attr['id'] for attr in first.extra_attributes ]\n for attr in attrs:\n buf1 = getattr(first, attr)\n buf2 = getattr(second, attr)\n if len(buf1) != len(buf2):\n return False\n\n for i in range(len(buf1)):\n i2 = second_verts[tuple(first.vertices[i])]\n if buf1[i] != buf2[i2]:\n return False\n\n return True", "def __eq__(self, other: RBtree) -> bool:\n comp = lambda n1, n2: n1 == n2 and ((comp(n1.left, n2.left) and comp(n1.right, n2.right)) if (n1 and n2) else True)\n return comp(self.root, other.root) and self.size == other.size", "def _equivalent_data_structures(struct_1, struct_2):\n if isinstance(struct_1, np.ndarray):\n return np.allclose(struct_1, struct_2)\n if isinstance(struct_1, Mapping):\n if set(struct_1.keys()) != set(struct_2.keys()):\n return False\n return all(\n _equivalent_data_structures(struct_1[key], struct_2[key])\n for key in struct_1)\n if isinstance(struct_1, Sequence):\n if len(struct_1) != len(struct_2):\n return False\n return all(\n _equivalent_data_structures(value_1, value_2)\n for value_1, value_2 in zip(struct_1, struct_2))\n if isinstance(struct_1, Number):\n return math.isclose(struct_1, struct_2)\n return False", "def compareTree(t1, t2):\n \n reorderTree(t1)\n reorderTree(t2)\n\n return compareTreeHelper(t1, t2)", "def __eq__(self, other):\n return type(other) is type(self) and other._node is self._node", "def __eq__(self, other):\n return type(other) is type(self) and other._node is self._node", "def __eq__(self, other):\n return type(other) is type(self) and other._node is self._node", "def __eq__(self, other):\n return type(other) is type(self) and other._node is self._node", "def __eq__(self, other):\n return type(other) is type(self) and other._node is self._node", "def __eq__(self, other):\n return type(other) is type(self) and other._node is self._node", "def is_same(self: _R, other: _R) -> bool:\n children = [i.render() for i in self.children]\n other_children = [i.render() for i in other.children]\n return other_children == children", "def __eq__(self, other):\n if not isinstance(other, Tree):\n raise ValueError('Cannot compare objects.')\n\n if self.is_leaf():\n if other.is_leaf():\n return True\n else:\n return False\n\n if other.is_leaf():\n return False\n\n if (self.left_subtree == other.left_subtree and self.right_subtree == other.right_subtree) \\\n or (self.left_subtree == other.right_subtree and self.right_subtree == other.left_subtree):\n return True", "def test_equivalency(self):\n def compare_func(obj, node):\n # same id\n self.assertEqual(obj.id, node.get(\"id\"))\n\n # same html\n self.assertEqual(obj.html.prettify, node.prettify)\n\n # parents have same id (only for non-root elements)\n if not obj == self.document.root:\n self.assertEqual(obj.parent.id, node.parent.get(\"id\"))\n\n # same number of children\n child_nodes = self.get_children_of_node(node)\n self.assertEqual(len(obj.children), len(child_nodes))\n\n # children have same ids\n for (child_obj, child_node) in zip(obj.children, child_nodes):\n self.assertEqual(child_obj.id, child_node.get(\"id\"))\n\n self.recursively_compare_tree_against_html(compare_func)", "def __eq__(self, other) -> bool:\n if not isinstance(other, self.__class__):\n return False\n\n if self.number_of_nodes() != other.number_of_nodes():\n return False\n if self.number_of_edges() != other.number_of_edges():\n return False\n\n if list(self.nodes) != list(other.nodes):\n return False\n\n # Compare node data.\n for i in self.nodes:\n # We may want to exclude the 'name' attribute from comparisons, assuming\n # it has no logical meaning.\n if self.nodes[i] != other.nodes[i]:\n return False\n\n if list(self.edges) != list(other.edges):\n return False\n\n for i, j in self.edges:\n # Compare edge data.\n if self.edges[i, j] != other.edges[i, j]:\n return False\n\n return True", "def nodes_are_equal(node1, node2):\n\n try:\n return dump_ast(node1).strip() == dump_ast(node2).strip() and \\\n node1.lineno == node2.lineno and \\\n node1.col_offset == node2.col_offset\n except:\n return False", "def __eq__(self, other):\n if not isinstance(other, Node):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self: 'RegexTree', other: object) -> bool:\n return (isinstance(other, RegexTree) and\n self.symbol == other.symbol and\n self.children == other.children)", "def compare_trees(tree1, tree2):\n for key in tree1.keys():\n print(key)\n assert key in tree2.keys()\n if isinstance(tree1[key], list):\n print(tree1[key])\n assert tree1[key] == tree2[key]\n else:\n print('Calling compare_trees recursively')\n compare_trees(tree1[key], tree2[key])", "def isSameTree(self, p, q):\n # Initialize two queues for each tree with root nodes\n q1, q2 = deque([p]), deque([q])\n\n # Loop while q1 and q2 len(q1) == len(q2)\n while (q1 and q2) and (len(q1) == len(q2)):\n p1 = q1.popleft() # -> None\n p2 = q2.popleft() # -> None\n\n # Check for left and right child for each tree\n if (p1 and p2) and (p1.val == p2.val):\n # If the values are the same, put them in q1, q2\n q1.extend([p1.left, p1.right])\n q2.extend([p2.left, p2.right])\n elif (not p1 and not p2):\n continue\n else:\n # If the values are not the same, return False\n return False\n\n # Return True\n return True", "def __eq__(self, other):\n return other.left == self.left and other.right == self.right", "def compare_trees(tree1, tree2):\n \tresponse = {}\n \tstart_time = time.time()\n \ttry:\t\n \t\ttns = dendropy.TaxonNamespace() \t\n \t\n \t\ttree_obj1 = dendropy.Tree.get(data=tree1, schema=\"newick\",taxon_namespace=tns)\n \t\ttree_obj2 = dendropy.Tree.get(data=tree2, schema=\"newick\",taxon_namespace=tns)\n\n \t\ttree_obj1.encode_bipartitions()\n \t\ttree_obj2.encode_bipartitions()\n\n \t\t#-----------------------------------------------------------\n \t\t#This method returns the symmetric distance between two trees. \n \t\t#The symmetric distance between two trees is the sum of the number of splits found in one of the trees but not the other. \n \t\t#It is common to see this statistic called the Robinson-Foulds distance\n\n \t\tareSame = True if treecompare.symmetric_difference(tree_obj1, tree_obj2) == 0 else False\n \t\tstatus = 200\n \t\tmessage = \"Success\"\n \t\tresponse['are_same_tree'] = areSame\n \n \texcept Exception, e:\n \t\tif \"Incomplete or improperly-terminated tree statement\" in str(e): #invalid: \"((A,B),C,D));\" valid: ((A,B),(C,D)); \n \t\t\tmessage = \"NewickReaderIncompleteTreeStatementError: \" + str(e)\n \t \t\tstatus = 400\n \t\telif \"Unbalanced parentheses at tree statement\" in str(e): #invalid: \"((A,B),(C,D);\" valid: ((A,B),(C,D)); \n \t\t\tmessage = \"NewickReaderMalformedStatementError: \"+str(e) \n \t \t\tstatus = 400\n \t\telif \"Multiple occurrences of the same taxa\" in str(e): #invalid: \"((A,B),(C,C));\" valid: ((A,B),(C,D));\n \t\t\tmessage = \"NewickReaderDuplicateTaxonError: \"+str(e)\n \t \t\tstatus = 400\n \t\telif \"Unexpected end of stream\" in str(e): # invalid: \"((A,B),(C,D))\" valid: ((A,B),(C,D));\n \t\t\tmessage = \"UnexpectedEndOfStreamError: \"+str(e)\n \t \t\tstatus = 400\n \t\telse:\n \t\t\tmessage = \"Error: Failed to compare trees. \"+str(e)\n \t \t\tstatus = 500\n \t \t\n \tresponse['status_code'] = status\n \tresponse['message'] = message\n\n \tend_time = time.time()\n \texecution_time = end_time-start_time\n #service result creation time\n \tcreation_time = datetime.datetime.now().isoformat()\n \tmeta_data = {'creation_time': creation_time, 'execution_time': float('{:4.2f}'.format(execution_time)), 'source_urls':[\"http://dendropy.org/library/treecompare.html#module-dendropy.calculate.treecompare\"] }\n\n \tresponse['meta_data'] = meta_data\n \tprint response\n \treturn response" ]
[ "0.76219094", "0.719566", "0.7192063", "0.7131326", "0.7116567", "0.71084994", "0.70945626", "0.7080383", "0.70304936", "0.70223695", "0.7013674", "0.6955322", "0.69210637", "0.69146246", "0.68956894", "0.68956894", "0.68956894", "0.68956894", "0.68956894", "0.6876918", "0.68448526", "0.6822652", "0.68186593", "0.67851853", "0.6702377", "0.66775274", "0.66774064", "0.6634364", "0.66307783", "0.6627449" ]
0.75384027
1
count no of half nodes via level order traversal
def count_half_nodes(self): queue = [self] half_nodes = 0 half = False while queue: curr_node = queue.pop(0) if curr_node.left: queue.append(curr_node.left) half = not half if curr_node.right: queue.append(curr_node.right) half = not half if half: half_nodes += 1 half = not half return half_nodes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ht(node):\n n = 0\n while node: n, node = n+1, node.left\n return n", "def count_nodes(self):\n if self.is_empty():\n return 0\n elif self.is_leaf():\n return 1\n else:\n if self.get_left():\n if self.get_right():\n return 1 + self.get_left().count_nodes() + self.get_right().count_nodes()\n else:\n return 1 + self.get_left().count_nodes()\n else:\n return 1 + self.get_right().count_nodes()", "def _children_count(self):\n cnt = 0\n if self.left:\n cnt += 1\n if self.right:\n cnt += 1\n return cnt", "def numNodes(T):\r\n n = 1\r\n if T.isLeaf:\r\n return n\r\n for i in range(len(T.child)):\r\n n += numNodes(T.child[i])\r\n return n", "def node_count(self):\n if self.value:\n cnt = 0\n else:\n left_cnt = self.left.node_count()\n right_cnt = self.right.node_count()\n cnt = 1 + left_cnt + right_cnt\n return cnt", "def nbr_nodes(tree_depth):\n return 2**(tree_depth+1)-1", "def leaf_count(T):\n if T.is_leaf:\n return 1\n else:\n# s = 0\n# for child in T:\n# s += leaf_count(child)\n# return s\n return reduce(add, map(leaf_count, T))", "def count_leaf(self):\n if self.is_empty():\n return 0\n elif self.is_leaf():\n return 1\n else:\n if self.get_left():\n if self.get_right():\n return 0 + self.get_left().count_leaf() + self.get_right().count_leaf()\n else:\n return 0 + self.get_left().count_leaf()\n else:\n return 0 + self.get_right().count_leaf()", "def count_nodes(self):\n\t\treturn self.__count_nodes(self)", "def leaf_count(self) -> int:\n if self.children == []:\n return 1\n else:\n return sum([x.leaf_count() for x in self.children])", "def count(self):\r\n return self.count_helper(self.top_node)", "def num_children(self, n):\n counter = 0\n if self.left(n):\n counter += 1\n if self.right(n):\n counter += 1\n return counter", "def _get_level(self, node):\n level = 0\n while node.parent:\n node = node.parent\n level += 1\n return level", "def leaf_count(T):\n if T.is_leaf:\n return 1\n else:\n s = 0\n for child in T:\n s += leaf_count(child)\n return s\n # Can you put the else clause in one line instead?\n return functools.reduce(operator.add, map(leaf_count, T), 0)", "def countNodes(epr):\n result = 1\n argLst = epr.args\n for arg in argLst:\n result += countNodes(arg)\n return result", "def countNodes(self, root):\n\n\n if not root:\n return 0\n\n return 1+self.countNodes(root.left)+self.countNodes(root.right)", "def complexity(self, mode='#nodes'):\n if mode == '#nodes':\n return len(self.nodes)", "def count_nodes(self):\n if self.children is None:\n return 0\n\n total_count = 0\n for child in self.children:\n if child is None:\n return 0\n child_count = child.count_nodes()\n total_count = total_count + child_count\n\n return total_count+1", "def total_nodes(self)->int:\n\t\tqueue=[]\n\t\tsum=0\n\t\tqueue.append(self)\n\t\twhile(len(queue)>0):\n\t\t\tnode=queue.pop(0)\n\t\t\tsum+=1\n\t\t\tif(node.right!=None):\n\t\t\t\tqueue.append(node.right)\n\t\t\tif(node.left!=None):\n\t\t\t\tqueue.append(node.left)\n\t\treturn sum", "def count(self):\n\t\treturn len(list(self.nodes))", "def number_nodes(tree):\n def list_of_nodes(tree):\n \"\"\"Return a list of internal nodes in postorder traversal\n\n @param HuffmanNode tree: a tree to traverse\n @rtype: list\n\n >>> t = HuffmanNode(None, HuffmanNode(6), HuffmanNode(7))\n >>> list_of_nodes(t) == [t]\n True\n >>> t = HuffmanNode(None, HuffmanNode(8), HuffmanNode(None, HuffmanNode(5), HuffmanNode(6)))\n >>> list_of_nodes(t) == [HuffmanNode(None, HuffmanNode(5), HuffmanNode(6)), HuffmanNode(None, HuffmanNode(8), HuffmanNode(None, HuffmanNode(5), HuffmanNode(6)))]\n True\n \"\"\"\n list_ = []\n if tree.left.is_leaf() and tree.right.is_leaf():\n list_.append(tree)\n return list_\n elif tree.left.is_leaf():\n list_.extend(list_of_nodes(tree.right))\n list_.append(tree)\n return list_\n elif tree.right.is_leaf():\n list_.extend(list_of_nodes(tree.left))\n list_.append(tree)\n return list_\n else:\n list_.extend(list_of_nodes(tree.left))\n list_.extend(list_of_nodes(tree.right))\n list_.append(tree)\n return list_\n\n internal_nodes = list_of_nodes(tree)\n for i in range(len(internal_nodes)):\n node = internal_nodes[i]\n node.number = i", "def height(node):\r\n \r\n height = 0\r\n temp = node\r\n while temp != None:\r\n temp = temp.parent\r\n height += 1\r\n return height", "def leaf_count(t: Tree) -> int:\n if t.children == []:\n return 1\n else:\n return sum([leaf_count(child) for child in t.children])", "def __len__(self):\n return len(self.subtrees())", "def numNodes(self):\n res = 0\n for n in self.iternodes():\n res += 1\n return res", "def get_n_leaves(clf):\n leaves = clf.tree_.children_left == -1\n leaves = np.arange(0,clf.tree_.node_count)[leaves]\n return len(leaves)", "def count_helper(self, node: object) -> int:\n if self.leaf(node): # If current node is a leaf\n return 1\n\n # Cases if current node has a single child\n if node.left is not None and node.right is None:\n return self.count_helper(node.left)\n\n if node.left is None and node.right is not None:\n return self.count_helper(node.right)\n\n # Case where current node has two children\n return self.count_helper(node.left) + self.count_helper(node.right)", "def size(node):\n\t\tif node is None:\n\t\t\treturn 0\n\t\treturn 1+BST.size(node.left)+BST.size(node.right)", "def children_num(self,p):\n counter = 0\n for child in self.children(p):\n counter += 1\n return counter", "def node_count(self):\n return self._root.count()" ]
[ "0.7206602", "0.70298666", "0.6988069", "0.6873926", "0.6840664", "0.67481023", "0.674637", "0.66773266", "0.6636227", "0.6613095", "0.65921193", "0.65709895", "0.656669", "0.65418726", "0.6516444", "0.6488223", "0.6473618", "0.64561695", "0.64550674", "0.6419698", "0.6414584", "0.6412171", "0.64065737", "0.6402799", "0.63927585", "0.63746166", "0.63024056", "0.6278398", "0.62538123", "0.62462616" ]
0.7208616
0
A metaphorical superclass for various card factory functions.
def card_factory(rank,suit): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def card(self):\r\n return Card(self)", "def card(self):\r\n return Card(self)", "def mock_card():\n return Card(Suit.SPADE, 1)", "def make_card(cls, suit, pip):\n return Card(suit, pip)", "def card(self, card_id):\r\n return Card(self, card_id)", "def __init__(self):\n self._cards = []\n #Add a single card for each suit and rank\n for suit in Card.SUITS:\n for rank in Card.RANKS:\n c = Card(rank, suit)\n self._cards.append(c)", "def define_card(card):\n try:\n value = define_card_value(card[0])\n color = define_card_color(card[1])\n return Card(value, color)\n except AttributeError:\n pass", "def __init__(self, this_card_name, this_card_points, this_card_suit):\n self.card_name = this_card_name\n self.card_points = this_card_points\n self.card_suit = this_card_suit", "def __init__(self, cards):\n self.cards = cards", "def card(self, card_id_or_shortlink):\r\n return Card(self, card_id_or_shortlink)", "def __init__(self):\n self.deckcards = []\n for suit_by_number in range(4):\n for rank_by_number in range(1, 14):\n card = card_create.Createcard(suit_by_number, rank_by_number)\n self.deckcards.append(card)", "def __init__(self):\n self.cards = [Card(face=card[0], value=card[1], suit=suit)\n for card in CARD_VALUES().items() for suit in CARD_SUITS()]", "def __init__(self, cardname, amount):\n self.cardname = str(cardname)\n self.amount = int(amount)", "def __init__(self):\n self.deck = []\n for n in range(1, 14):\n card1 = Card(n, \"diamond\")\n self.deck.append(card1)\n\n for n in range(1, 14):\n card1 = Card(n, \"spade\")\n self.deck.append(card1)\n\n for n in range(1, 14):\n card1 = Card(n, \"heart\")\n self.deck.append(card1)\n\n for n in range(1, 14):\n card1 = Card(n, \"club\")\n self.deck.append(card1)", "def cards(self):\r\n return Cards(self)", "def _cards_getter(self):\n pass", "def __init__(self, deck_type=\"standard\"):\n if deck_type == \"standard\":\n self.card_values = {\n \"ace\": 1,\n \"two\": 2,\n \"three\": 3,\n \"four\": 4,\n \"five\": 5,\n \"six\": 6,\n \"seven\": 7,\n \"eight\": 8,\n \"nine\": 9,\n \"ten\": 10,\n \"jack\": 10,\n \"queen\": 10,\n \"king\": 10\n }\n self.cards = []\n self.fill_standard_deck()\n else:\n raise Exception(\"Only standard deck type is supported right now.\")", "def __init__(self, suit_rank):\n self.suit = suit_rank[0]\n self.rank = suit_rank[1]\n self.name, self.values = self._translate_card()\n self.image_path = \"\"\n self.image_obj = None", "def __init__ ( self ):\n \n self.__deck = []\n \n for i in range(0,7):\n self.__deck.append('1')\n \n for i in range(0,10):\n self.__deck.append('2')\n \n for i in range(0,3):\n self.__deck.append('3')\n \n #appends the event cards using the first 3 letters of the card in all caps\n self.__deck.append('SEA')\n self.__deck.append('HER')\n self.__deck.append('VIC')\n self.__deck.append('PIL')\n self.__deck.append('TRU')", "def __init__(self):\n # start with the wild cards\n self.pile = [Card.wild_card(), Card.wild_card()]\n for i in range(Card.num_values):\n for j in range(Card.num_colors):\n for k in range(Card.num_shapes):\n self.pile.append(Card(i + 1, j + 1, k + 1, False))\n assert len(self.pile) == Pile.num_cards\n shuffle(self.pile)", "def __init__(self, card_one, from_split=False, player=None, **kwargs):\n if isinstance(card_one, Card):\n self.card_one = card_one\n else:\n raise TypeError(\"'card_one' must be a Card object.\")\n\n if player:\n if isinstance(player, Player):\n self.player = player\n else:\n raise TypeError(\"'player' must be a Player object.\")\n self.wager = self.player.wager(**kwargs)\n self.cards = [card_one]\n self.split = False\n self.soft = card_one.rank == 14\n self.stand = False\n self.bust = False\n self.blackjack = False\n self.from_split = from_split\n self.insurance = False\n self.total = card_one.value\n self.surrender = False\n self.double_down = False\n # this is used to determine whether to add 11 or 1 when delt an ace\n self.non_ace_total = 0\n self.num_aces = 1 * self.soft\n self.num_hard_aces = self.num_aces", "def factory_method(self):\n pass", "def factory_method(self):\n pass", "def __init__(self, n):\r\n\t\tif n >= 52:\r\n\t\t\traise Exception(\"This card does not exist\")\r\n\t\tself.n = n", "def __init__(self):\r\n self.__suit_dict = [{\"Diamonds\": 1}, {\"Spades\": 2}, {\"Harts\": 3}, {\"Clubs\": 4}]\r\n self.cards_list = []\r\n for suit in self.__suit_dict:\r\n for value in range(1, 14):\r\n self.cards_list.append(Card(suit, value))\r\n self.Shuffle()", "def test_constructor(self):\n hand = Hand([Card(\"A\", \"D\")])\n assert isinstance(hand, Hand)", "def __init__(self, cards = []):\n self.cards=cards", "def __init__(self,card_name):\n self.mw_card=Cards.Card(card_name)\n self.info=self.mw_card.info\n for key,value in self.info.items():\n self[key]=value\n\n dict.__init__(self.info)\n self.check_info()\n #assign special value\n self.nb_event=int(self.info['mw_run']['2'])\n self.nb_card=self.number_of_P_run()\n self.check_condor()\n self.name=self.take_run_name()\n self.P_listdir,self.MW_listdir=self.detect_SubProcess()\n self.init_run_opt()\n self.def_actif_param()", "def __init__(self, requester: NSRequester, cardid: int, season: str) -> None:\n # Kinda messy here. We could pass `\"cardid\"` and `cardid` as the api and name,\n # but still need to inject season somehow, and it would probably be more confusing\n # if the two were split up, so both are jammed in the overridden _key method\n # Should API be extended to handle multiple keys? maybe, maybe not.\n super().__init__(requester, \"card\", \"\")\n\n self.id = cardid\n self.season = season\n\n # The maximum number of trades returned by the trade shard in one request\n self.tradeResponseLimit = 50", "def __init__(self, cards, stack_style=SQUARED):\n self.cards = cards\n self.stack_style = stack_style" ]
[ "0.7251714", "0.7251714", "0.6682919", "0.65862644", "0.6558133", "0.6534848", "0.6505554", "0.6370556", "0.6358612", "0.6308622", "0.6301257", "0.62498856", "0.62281394", "0.6223822", "0.62192464", "0.6216633", "0.6190043", "0.61669254", "0.6140639", "0.6122692", "0.60871726", "0.60764295", "0.60764295", "0.6070503", "0.606069", "0.60125315", "0.59746474", "0.5929533", "0.5923363", "0.5917192" ]
0.7608941
0
This method import all data files and collect them in one big dictionary, every key is a cell, and contains the data for this cell for all time_points
def createDictBase(self): #allFiles = glob.glob(self.path + "/*"+ self.filetype) #data = pd.read_excel(allFiles[0]) #================================================================================================================== # self.list_files = self.Files_to_import() # data=pd.read_excel(self.path +'/'+self.list_files[0]) # importing the first excel sheet from the first/zero time point self.list_files = self.Files_to_import() try: tim = pd.read_excel(self.path +'/timePoints' + self.filetype) # importin the time points from a shhet called time_points time = np.array(tim['time']) # assigning variable time conataing an array with the timepoints self.nr_files = len(time) except: time = np.array(list(range(self.nr_files))) data=pd.read_excel(self.path +'/'+self.list_files[0]) data=np.array(data) # converts it to array, so we can manipualte the data easier #python wants for some reason first to create the dictionary with at least on value before we can run it in a loop. THat is why we have litle redundancy, since the next part is allmost the same. for i in range(len(data)): # the numbers of rows. Goes through the rows for ii in range(len(data[i])): # the numbers of columns. For every row goes through the columns cell_id=str(i)+str(ii) # we create a variable that has a value cell_id= rowNUm colNUm, for example x= '34' means row 3 column 4 dat=[] # a list that will contain the first value of the cell. It will be cleaned every time the loop runs the newxt value dat.append(data[i][ii]) # we put the value of the well to the list self.dbase[cell_id]=dat # the list is put to the table. For example dabse['cell_id']= some OD value # then we go through the rest of the excell time points and collect them for i in range(1,len(time)): if self.list_files[i] != 0: #data = pd.read_excel(allFiles[i]) data=pd.read_excel(self.path +'/'+ self.list_files[i]) data=np.array(data) for i in range(len(data)): # the numbers of rows. Goes through the rows for ii in range(len(data[i])): # the numbers of columns. For every row goes through the columns cell_id=str(i)+str(ii) # we create a variable that has a value cell_id= rowNUm colNUm, for example x= '34' means row 3 column 4 tempVar=self.dbase[cell_id] # here we use a method of exchanging variables to be able to uppdate the cloumn corresponding to the cell_id tempVar.append(data[i][ii]) # add the new data to the copy self.dbase[cell_id] = tempVar # uppdate the original dictionary else: pass self.dbase['time'] = time # at theend we add a column that takes care of the time_points return self.dbase
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data(self):\n\n all_data = OrderedDict()\n projects = [Path(proj) for proj in glob(str(self.data_path.joinpath(\"*\"))) if Path(proj).is_dir()]\n\n for project in projects:\n files = []\n \n # Read all csv files and save them as a list in files\n for ver in glob(str(project.joinpath(\"*.csv\"))):\n files.extend(pd.read_csv(ver, usecols=['time', 'buggy']).values.tolist())\n \n # Create a pandas dataframe from the csv sorted by datetime\n df = pd.DataFrame(files, columns=['Time', 'Bugs']).sort_values(by='Time').reset_index(drop=True)\n \n # Convert time to Pandas DateTime format\n df['Time'] = pd.to_datetime(df['Time']) \n \n # Group bug counts by week starting on monday\n df = df.reset_index().set_index('Time').groupby(\n [pd.Grouper(freq='W-MON')])[\"Bugs\"].sum().astype(int).reset_index()\n \n df = df.set_index('Time')\n # Save the data to dictionary\n all_data.update(OrderedDict({project.name: df}))\n\n return all_data", "def organize_data(path_dir, accelerometer_file, accelerometer_data):\n\n accelerometer_df = pd.read_csv(os.path.join(path_dir, accelerometer_file), usecols=['UTC time', 'x', 'y', 'z'])\n\n x_list = accelerometer_df['x']\n y_list = accelerometer_df['y']\n z_list = accelerometer_df['z']\n UTC_times_list = accelerometer_df['UTC time']\n\n x_y_z_list_for_hour = [] # will contain 60*60 values, that every value is [x,y,z]\n\n curr_line_index = 0\n curr_date_time = get_date_time_from_UTC_time(UTC_times_list[curr_line_index])\n for i in range(60):\n for j in range(60):\n if (curr_date_time.minute != i or curr_date_time.second != j) or curr_line_index + 1 == len(UTC_times_list): # the curr time is more or little then the wanted time, or we finished all the lines in the file --> there is a need to fulfill the values with 0,0,0\n continue\n else:\n x_y_z_list_for_hour.append([x_list[curr_line_index], y_list[curr_line_index], z_list[curr_line_index]])\n while curr_date_time.minute == i and curr_date_time.second <= j and curr_line_index + 1 != len(UTC_times_list):\n curr_line_index += 1\n curr_date_time = get_date_time_from_UTC_time(UTC_times_list[curr_line_index])\n date = get_date_from_file_name(accelerometer_file)\n hour = curr_date_time.hour\n if date not in accelerometer_data.data_dic:\n accelerometer_data.data_dic[date] = {}\n accelerometer_data.data_dic[date][hour] = x_y_z_list_for_hour", "def _read_trajectory_files(self):\n dflist = []\n self.Ntimes = {}\n for downD in self.case.downstreamD:\n outputs = self.case.get_outputs(self.method,downD)\n print(outputs['trajectory_file'])\n df = pd.read_csv(outputs['trajectory_file'],\n header=None,\n usecols=[0,1,2])\n df.columns = ['t','y','z']\n df['x'] = downD * self.case.turbine.D\n df['z'] -= self.case.turbine.zhub\n df = df.set_index(['t','x'])[['y','z']]\n self.Ntimes[downD] = len(df.index.levels[0])\n dflist.append(df)\n self.df = pd.concat(dflist).sort_index()", "def makeDataDict(filenames):\n dictionary = defaultdict(list)\n for i, filename in enumerate(filenames):\n print('{}. working with {}, please wait...'.format(i, filename.decode('utf-8')))\n # calling csv2dict(), to convert the csv file to dictionary\n data = helper_functions.csv2dict('Data/csvfiles/' + str(filename.decode('utf-8')))\n # accessing the key aftershocksyn to check for unique values, similar like (set(list[1, 1, 0, 2, 3])) -> outputs [1, 1, 0, 2, 3]\n grid_aftershock_count = np.double(data['aftershocksyn'])\n # no use of if\n #if len(np.unique(grid_aftershock_count)) < 2:\n # continue\n temp = grid_aftershock_count.tolist()\n # adding a (key, value) to the testingSet\n dictionary['aftershocksyn'].extend(temp)\n # now adding remaining columns\n for column in columns:\n dictionary[column].extend(np.double(data[column]))\n\n return dictionary", "def load_data(path_to_data, raw_times):\n\n loaded_data = {}\n file_names = [\n \"fore_train_ip\",\n \"fore_valid_ip\",\n \"train_ip\",\n \"valid_ip\",\n \"test_ip\",\n \"fore_train_op\",\n \"fore_valid_op\",\n \"train_op\",\n \"valid_op\",\n \"test_op\",\n ]\n for key in file_names:\n with open(os.path.join(path_to_data, key + \".json\"), \"r\") as openfile:\n loaded_data[key] = json.load(openfile)\n fore_train_ip = [np.array(x) for x in loaded_data[\"fore_train_ip\"]]\n fore_valid_ip = [np.array(x) for x in loaded_data[\"fore_valid_ip\"]]\n train_ip = [np.array(x) for x in loaded_data[\"train_ip\"]]\n valid_ip = [np.array(x) for x in loaded_data[\"valid_ip\"]]\n test_ip = [np.array(x) for x in loaded_data[\"test_ip\"]]\n fore_train_op = np.array(loaded_data[\"fore_train_op\"])\n fore_valid_op = np.array(loaded_data[\"fore_valid_op\"])\n train_op = np.array(loaded_data[\"train_op\"])\n valid_op = np.array(loaded_data[\"valid_op\"])\n test_op = np.array(loaded_data[\"test_op\"])\n del loaded_data\n\n if not raw_times:\n # default is False, so times usually WILL be normalized\n # compute mean and variance of times in training set while ignoring padding\n missing_idx = fore_train_ip[3] == 0\n tmp_times = copy.deepcopy(fore_train_ip[1])\n tmp_times[missing_idx] = np.nan\n time_mean = np.nanmean(tmp_times)\n time_stddev = np.nanstd(tmp_times)\n tmp_times = (tmp_times - time_mean) / time_stddev\n tmp_times[missing_idx] = 0\n fore_train_ip = [\n fore_train_ip[0],\n tmp_times,\n fore_train_ip[2],\n fore_train_ip[3],\n ]\n\n # normalize val set times\n missing_idx = fore_valid_ip[3] == 0\n tmp_times = copy.deepcopy(fore_valid_ip[1])\n tmp_times[missing_idx] = np.nan\n tmp_times = (tmp_times - time_mean) / time_stddev\n tmp_times[missing_idx] = 0\n fore_valid_ip = [\n fore_valid_ip[0],\n tmp_times,\n fore_valid_ip[2],\n fore_valid_ip[3],\n ]\n\n # normalize labeled datasets\n for tmp_ip in [train_ip, valid_ip, test_ip]:\n missing_idx = tmp_ip[3] == 0\n tmp_times = copy.deepcopy(tmp_ip[1])\n tmp_times[missing_idx] = np.nan\n tmp_times = (tmp_times - time_mean) / time_stddev\n tmp_times[missing_idx] = 0\n tmp_ip[1] = tmp_times\n else:\n time_mean = time_stddev = None\n\n return (\n fore_train_ip,\n fore_train_op,\n fore_valid_ip,\n fore_valid_op,\n train_ip,\n train_op,\n valid_ip,\n valid_op,\n test_ip,\n test_op,\n time_mean,\n time_stddev,\n )", "def fullLoad(f_list):\n\n t_1 = datetime.now()\n\n id_list = []\n data_dict = {}\n\n for i, f in enumerate(f_list):\n \n i_t_1 = datetime.now()\n\n r_f = open(f, \"rb\")\n r_data = r_f.read().split(\"\\n\")\n\n ln_r_data = len(r_data)\n\n for j, r in enumerate(r_data[:-1]):\n j_r = json.loads(r)\n if j_r[\"_id\"] not in id_list:\n id_list.append(j_r[\"_id\"])\n date = datetime.strptime(j_r[\"pub_date\"][0:10], \"%Y-%m-%d\")\n if date not in data_dict:\n data_dict[date] = \"\"\n data_dict[date] += \" %s\" % j_r[\"lead_paragraph\"]\n print (j * 100.) / ln_r_data, datetime.now() - t_1, datetime.now() - i_t_1, i \n return data_dict", "def main(file_list):\n data_store = {}\n \n for file in file_list:\n sample_id = get_sample_id(file)\n data_store[sample_id] = {}\n data_store[sample_id][\"sample_type\"], data_store[sample_id][\"out_filename\"], data_store[sample_id][\"out_location\"] = check_name(file, sample_id)\n data_store[sample_id][\"df_parameters\"], data_store[sample_id][\"df_values\"], data_store[sample_id][\"df_parameters_for_values\"] = data_in(file, sample_id)\n if data_store[sample_id][\"sample_type\"] == \"EFC\":\n binder_mass = efc_calcs(data_store[sample_id][\"df_parameters\"])\n elif data_store[sample_id][\"sample_type\"] == \"OPC\":\n binder_mass = opc_calcs(data_store[sample_id][\"df_parameters\"])\n data_store[sample_id][\"df_values\"] = tidy_val_df(data_store[sample_id][\"df_values\"], binder_mass)\n data_store[sample_id][\"df_parameters\"] = tidy_param_df(sample_id, data_store[sample_id][\"df_parameters\"], data_store[sample_id][\"out_filename\"])\n for key, value in data_store.items():\n write_to_excel(key, value[\"df_parameters\"], value[\"df_values\"], value[\"df_parameters_for_values\"], value[\"out_location\"])", "def get_data(self):\r\n\r\n # Find the absolute path for the root dir (04-Decision-Science)\r\n # Uses __file__ as absolute path anchor\r\n root_dir = os.path.abspath('')\r\n\r\n # Use os library for Unix vs. Widowns robustness\r\n xls_path = os.path.join(root_dir, 'data')\r\n\r\n file_names = [f for f in os.listdir(csv_path) if f.endswith('.xls')]\r\n\r\n def key_from_file_name(f):\r\n if f[-4:] == '.xls':\r\n return f[:-4]\r\n\r\n # Create the dictionary\r\n data = {}\r\n for f in file_names:\r\n data[key_from_file_name(f)] = pd.read_excel(os.path.join(xls_path, f))", "def load_data(self):\n logging.debug('Loading data from file ({})...'.format(self.file_name))\n parsed_data = list()\n with open(self.file_name) as file_data:\n for line in file_data.readlines():\n temp = dict()\n if 'JD' in line:\n continue\n line = line.split()\n temp['ts'], temp['mag'], temp['dif'] = float(line[0][:14]), float(line[1]), float(line[2])\n temp['f_mag'] = self.kalman_filter(temp['mag'])\n temp['dt'] = self.jd_to_datetime(temp['ts'])\n temp['dt_cor'] = self.jd_to_datetime(temp['ts'] - TIME_CRT)\n parsed_data.append(temp)\n logging.debug(' {} records loaded.'.format(len(parsed_data)))\n logging.debug(parsed_data[0])\n self.data_stream = parsed_data", "def make_all_datetime(self):\n \n logging.info('\\n *** Running make_all_datetime ' )\n\n all_uniques = [] # storing a list with all the unique date_times \n which_k_in_dt = {} # list of avilable dataset for each unique date_time, so that when looping over the distinct date_times, only the proper dataset will be read and compared \n\n \"\"\" Loop over all the datasets \n k: name of the dataset\n v: list of file paths, eg 'era5_1':[filepath_1, filepath_2 ]\"\"\"\n\n for k,v in self.datasets.items() :\n self.unique_dates[k] = {}\n for F in v: \n self.unique_dates[k][F] = {}\n \n self.unique_dates[k][F]['indices'] = {} \n self.unique_dates[k][F]['index_offset_next'] = 0 # to be replaced later when slicing \n self.unique_dates[k][F]['index_offset'] = 0 # to be replaced later when slicing \n\n unique_dt = list(data[k][F]['recordtimestamp'])\n \n indices = list(data[k][F]['recordindex'])\n all_uniques += unique_dt # adding to the total unique date_times \n\n \"\"\" Loop over all the date_times of each dataset \"\"\"\n for dt, index_low, count in zip (unique_dt, indices, range(len(unique_dt)) ):\n\n if dt not in which_k_in_dt.keys():\n which_k_in_dt[dt] = {}\n if k not in which_k_in_dt[dt].keys():\n which_k_in_dt[dt][k] = [] \n if F not in which_k_in_dt[dt][k]:\n which_k_in_dt[dt][k].append(F)\n # at this point I have e.g. which_k_in_dt= {1990-01-01-12-00: {era5_1:[file1,file2] , ncar:[file3] } }\n\n self.unique_dates[k][F]['indices'][dt] = {}\n self.unique_dates[k][F]['indices'][dt]['low'] = index_low \n try:\n index_up = indices[ count + 1 ] # works until the last available recordindex\n except: \n index_up = max(indices)+1000000 # dummy large number \n\n self.unique_dates[k][F]['indices'][dt]['up'] = index_up\n self.unique_dates[k][F]['up_to_dt_slice'] = data[k][F]['min_date'] \n \n\n self.dataset_per_dt = which_k_in_dt \n self.merged_unique_dates = np.unique(np.array(all_uniques) ) # storing the set of *ALL* distinct dt values of all datasets and all files \n logging.debug('*** make_all_datetime finished ')", "def load_data(fi, file_count, path):\n et = []\n ep = []\n st = {}\n positions = {}\n\n for i in range(fi,fi+file_count):\n np_path = '%s%i.npz'%(path,i)\n data = np.load(np_path)\n \n idx = []\n used_clusters = []\n used_labels = []\n peakwidth = 20\n \n clusters = data['c']\n t = data['t']\n pos = data['p']\n \n if len(data['et']) > 0:\n et.extend(data['et'])\n try:\n ep.extend(data['ep'])\n except:\n pass\n \n for k in np.unique(clusters[clusters!=-1]):\n if k in st:\n st[k] = np.append(st[k],t[clusters==k])\n positions[k] = np.concatenate((positions[k],pos[clusters==k]))\n else:\n st[k] = t[clusters==k]\n positions[k] = pos[clusters==k]\n\n if len(ep)>0:\n ep = np.vstack(ep)\n return st, positions, np.array(et), ep", "def load_data(data_path, load_paths, month_year_start, month_year_end, \n day_start=None, day_end=None, verbose=False):\n \n # Load file containing GPS coordinates for blockfaces.\n with open(os.path.join(data_path, 'blockface_locs.p'), 'rb') as f:\n locations = pickle.load(f)\n \n # Load sheet containing blockface info about blockface operating times.\n block_info = pd.read_csv(os.path.join(data_path, 'block_info.csv'))\n keep_columns = ['ElementKey', 'PeakHourStart1', 'PeakHourEnd1', \n 'PeakHourStart2', 'PeakHourEnd2', 'PeakHourStart3', \n 'PeakHourEnd3', 'EffectiveStartDate', 'EffectiveEndDate']\n block_info = block_info[keep_columns]\n \n # Converting to datetime format for processing.\n for col in keep_columns:\n if 'Hour' in col:\n block_info.loc[:, col] = pd.to_datetime(block_info[col]).dt.time\n elif 'Date' in col:\n block_info.loc[:, col] = pd.to_datetime(block_info[col])\n else:\n pass\n \n # Loading holiday information for when paid parking is not available.\n cal = USFederalHolidayCalendar()\n holidays = cal.holidays(start='2012-01-01', end=datetime.datetime.now().date()).to_pydatetime()\n holidays = [hol.date() for hol in holidays]\n\n # Getting starting and ending date to keep data for.\n if day_start == None:\n day_start = 1\n if day_end == None:\n day_end = calendar.monthrange(month_year_end[1], month_year_end[0])[1]\n\n date_start = datetime.date(month_year_start[1], month_year_start[0], day_start)\n date_end = datetime.date(month_year_end[1], month_year_end[0], day_end)\n\n avg_loads = []\n gps_loc = []\n element_keys = []\n park_data = {}\n \n if isinstance(load_paths, list):\n pass\n else:\n load_paths = [load_paths]\n\n for load_path in load_paths:\n for fi in sorted(glob.glob(load_path + os.sep + '*.csv'), key=lambda fi: int(fi.split(os.sep)[-1].split('.')[0])):\n key = int(fi.split(os.sep)[-1].split('.')[0])\n\n block_data = pd.read_csv(fi, names=['Datetime', 'Load'])\n\n block_data['Datetime'] = pd.to_datetime(block_data['Datetime'])\n block_data.sort_values(by='Datetime', inplace=True)\n\n # Dropping days where the supply was 0 for this blockface.\n block_data.dropna(inplace=True)\n\n block_data['Date'] = block_data['Datetime'].dt.date\n block_data['Time'] = block_data['Datetime'].dt.time\n block_data['Day'] = block_data['Datetime'].dt.weekday\n block_data['Hour'] = block_data['Datetime'].dt.hour\n block_data['Minute'] = block_data['Datetime'].dt.minute\n\n # Keeping the data in the specified date range.\n block_data = block_data.loc[(block_data['Date'] >= date_start) & (block_data['Date'] <= date_end)]\n\n # Getting rid of Sunday since there is no paid parking.\n block_data = block_data.loc[block_data['Day'] != 6]\n\n # Dropping the days where the total parking is 0 because of holidays.\n block_data = block_data.loc[~block_data['Date'].isin(holidays)]\n block_data.reset_index(inplace=True, drop=True)\n\n # Clipping the loads to be no higher than 1.5\n block_data['Load'] = block_data['Load'].clip_upper(1.5)\n\n # If block contains no data, skip it.\n if len(block_data) == 0:\n if verbose:\n print('Skipping block %d because the supply is always 0.' % key)\n continue\n\n # If the block always has 0 occupancy, skip it.\n if len(block_data.loc[block_data['Load'] != 0]) == 0:\n if verbose:\n print('Skipping block %d because the occupancy is always 0.' % key)\n continue\n\n # Get GPS midpoint for block-face and skip if no information for it.\n if key in locations:\n curr_block = locations[key]\n\n lat1, lat2 = curr_block[1], curr_block[-2]\n lon1, lon2 = curr_block[0], curr_block[-3]\n\n mid_lat = (lat1 + lat2)/2.\n mid_long = (lon1 + lon2)/2.\n gps_loc.append([mid_lat, mid_long])\n else:\n if verbose:\n print('Skipping block %d because it was not found in locations.' % key)\n continue\n\n # Getting block-face info for the current key about hours of operation.\n curr_block_info = block_info.loc[block_info['ElementKey'] == key]\n\n # Filling times where paid parking is not allowed for the block with nan.\n for index, row in curr_block_info.iterrows():\n row_null = row.isnull()\n\n if not row_null['PeakHourStart1'] and not row_null['PeakHourStart2'] and not row_null['PeakHourStart3']:\n continue\n\n if not row_null['EffectiveEndDate']:\n row['EffectiveEndDate'] += datetime.timedelta(hours=23, minutes=59, seconds=59)\n\n if not row_null['PeakHourStart1']:\n\n start1 = pd.Series([datetime.datetime.combine(block_data.loc[i, 'Date'], row['PeakHourStart1']) for i in xrange(len(block_data))])\n end1 = pd.Series([datetime.datetime.combine(block_data.loc[i, 'Date'], row['PeakHourEnd1']) for i in xrange(len(block_data))])\n\n if row_null['EffectiveEndDate']:\n mask1 = ((row['EffectiveStartDate'] <= block_data['Datetime'])\n & (start1 <= block_data['Datetime']) \n & (end1 > block_data['Datetime'])\n & (block_data['Day'] != 5))\n else:\n mask1 = ((row['EffectiveStartDate'] <= block_data['Datetime']) \n & (row['EffectiveEndDate'] >= block_data['Datetime'])\n & (start1 <= block_data['Datetime']) \n & (end1 > block_data['Datetime'])\n & (block_data['Day'] != 5))\n\n block_data.loc[mask1, 'Load'] = np.nan \n\n if not row_null['PeakHourStart2']:\n\n start2 = pd.Series([datetime.datetime.combine(block_data.loc[i, 'Date'], row['PeakHourStart2']) for i in xrange(len(block_data))])\n end2 = pd.Series([datetime.datetime.combine(block_data.loc[i, 'Date'], row['PeakHourEnd2']) for i in xrange(len(block_data))])\n\n if row_null['EffectiveEndDate']:\n mask2 = ((row['EffectiveStartDate'] <= block_data['Datetime'])\n & (start2 <= block_data['Datetime']) \n & (end2 > block_data['Datetime'])\n & (block_data['Day'] != 5))\n else:\n mask2 = ((row['EffectiveStartDate'] <= block_data['Datetime']) \n & (row['EffectiveEndDate'] >= block_data['Datetime'])\n & (start2 <= block_data['Datetime']) \n & (end2 > block_data['Datetime'])\n & (block_data['Day'] != 5))\n\n block_data.loc[mask2, 'Load'] = np.nan \n\n if not row_null['PeakHourStart3']:\n\n start3 = pd.Series([datetime.datetime.combine(block_data.loc[i, 'Date'], row['PeakHourStart3'])\n for i in xrange(len(block_data))])\n end3 = pd.Series([datetime.datetime.combine(block_data.loc[i, 'Date'], row['PeakHourEnd3'])\n for i in xrange(len(block_data))])\n\n if row_null['EffectiveEndDate']:\n mask3 = ((row['EffectiveStartDate'] <= block_data['Datetime'])\n & (start3 <= block_data['Datetime']) \n & (end3 > block_data['Datetime'])\n & (block_data['Day'] != 5))\n else:\n mask3 = ((row['EffectiveStartDate'] <= block_data['Datetime']) \n & (row['EffectiveEndDate'] >= block_data['Datetime'])\n & (start3 <= block_data['Datetime']) \n & (end3 > block_data['Datetime'])\n & (block_data['Day'] != 5))\n\n block_data.loc[mask3, 'Load'] = np.nan \n\n # Getting the average load for each hour of the week for the block.\n avg_load = block_data.groupby(['Day', 'Hour'])['Load'].mean().values.reshape((1,-1))\n\n # If there is not data skip it.\n if avg_load.shape != (1, 72):\n gps_loc.pop()\n continue\n\n avg_loads.append(avg_load)\n element_keys.append(key)\n park_data[key] = block_data\n \n # Each row has load and GPS locations for a block. Ordered as in element_keys.\n avg_loads = np.vstack((avg_loads))\n gps_loc = np.vstack((gps_loc))\n\n index = park_data[park_data.keys()[0]].groupby(['Day', 'Hour']).sum().index\n\n days = index.get_level_values(0).unique().values\n days = np.sort(days)\n\n hours = index.get_level_values(1).unique().values\n hours = np.sort(hours)\n\n idx_to_day_hour = {i*len(hours) + j:(days[i], hours[j]) for i in range(len(days)) \n for j in range(len(hours))}\n day_hour_to_idx = {v:k for k,v in idx_to_day_hour.items()}\n \n for key in park_data:\n park_data[key] = park_data[key].set_index('Datetime')\n\n # Merging the dataframes into multi-index dataframe.\n park_data = pd.concat(park_data.values(), keys=park_data.keys())\n\n park_data.index.names = ['ID', 'Datetime']\n\n # Making the first index the date, and the second the element key, sorted by date.\n park_data = park_data.swaplevel(0, 1).sort_index()\n\n return element_keys, avg_loads, gps_loc, park_data, idx_to_day_hour, day_hour_to_idx", "def load_raw_data(dir, matlab=False):\n\n\tcurrent_dir = os.getcwd() \n\t\n\tos.chdir(dir)\n\t\n\tfile_names = []\n\tdata = {}\n\t\n\t\n\t## For text files\n\tif not matlab:\n\t\tfiles = glob.glob('*.txt')\n\t\t\n\t\tassert len(files) > 0, 'No *.txt files found!'\n\n\t\tif len(glob.glob('*.mat')) > 0:\n\t\t\tprint('WARNING: matlab files also found in directory: \\t%s'%dir)\n\t\t\n\t\tfor f in files:\n\t\t\tf_name = f.lower()\n\t\t\n\t\t\tif f_name.find('mark') > -1:\n\t\t\t\tdata['markers'] = np.loadtxt(f_name, skiprows=1)\n\t\t\t\tfile_names.append(f)\n\t\t\t\n\t\t\telif f_name.find('spike') > -1:\n\t\t\t\tdata['spikes'] = np.loadtxt(f_name, skiprows=1)\n\t\t\t\tfile_names.append(f)\n\t\t\t\n\t\t\telif f_name.find('shape') > -1:\n\t\t\t\tdata['shape'] = np.loadtxt(f_name, skiprows=1)\n\t\t\t\tfile_names.append(f)\n\t\n\n\t## For matlab files\n\t# These matlab files have more useful data than is extracted here.\n\telif matlab:\n\t\tfiles = glob.glob('*.mat')\n\t\t\n\t\tassert len(files) > 0, 'No matlab files found!'\n\t\t\n\t\tif len(glob.glob('*.txt')) > 0:\n\t\t\tprint('WARNING: text files also found in directory: \\t%s' %dir)\n\n\t\tfor f in files:\n\t\t\tf_name = f.lower()\n\t\t\t\n\t\t\t\n\t\t\tif f_name.find('mark') > -1:\n\t\t\t\t\n\t\t\t\tmark_file = h5py.File(f) # Loads hfd5 file\n\t\t\t\tmark_key = mark_file.keys()[0] # Gets name of relevant file for extract\n\t\t\t\t\n\t\t\t\t# Extract times of the markers\n\t\t\t\tdata['markers'] = np.array(mark_file['%s/times' %mark_key])\n\t\t\t\tdata['markers'] = np.reshape(data['markers'], -1) # turn to 1D array, as first axis redundant\n\t\t\t\t\n\t\t\t\t# Extract the numerical codes of the markers, which are listed one-to-one\n\t\t\t\t# with the times extracted above. Useful for an integrity check.\n\t\t\t\t# Zero index necessary as marker codes has three empty columns\n\t\t\t\tdata['marker_codes'] = np.array(mark_file['%s/codes' %mark_key][0])\n\t\t\t\tdata['marker_codes'] = np.reshape(data['marker_codes'], -1) # turn to 1D array, as first axis redundant\n\t\t\t\tfile_names.append(f)\n\n\t\t\telif f_name.find('spike') > -1:\n\n\t\t\t\tspike_file = h5py.File(f) # Loads hfd5 file\n\t\t\t\tspike_key = spike_file.keys()[0] # Gets name of relevant file for extract\n\t\t\t\t\n\t\t\t\t# Extract times of the spikes\n\t\t\t\tdata['spikes'] = np.array(spike_file['%s/times' %spike_key])\n\t\t\t\tdata['spikes'] = np.reshape(data['spikes'], -1) # turn to 1D array, as first axis redundant\n\n\n\t\t\t\t#Extract trace for each spike. First Dim-trace, second-spikes.\n\t\t\t\tspike_traces = np.array(spike_file['%s/values' %spike_key])\n\t\t\t\t\n\t\t\t\t# Calculate Average shape (for all templates, which are coded in '/codes')\n\t\t\t\tavg_spike_trace = np.mean(spike_traces, axis=1)\n\t\t\t\tsem_avg_spike_trace = stats.sem(spike_traces, axis=1, ddof=1)\n\t\t\t\t\n\t\t\t\tdata['shape'] = avg_spike_trace\n\t\t\t\tdata['shape_SEM'] = sem_avg_spike_trace\n\t\t\t\tfile_names.append(f) \n\t\t\t\t\n\t\t\t\t\t\t\n\tos.chdir(current_dir)\n\n\t\t\t\n\tif len(data.keys()) != len(files):\n\t\tmesg = 'Not all of your file names are recognised; they may not have been imported appropriately.'\n\t\tmesg2 = 'File names must contain the key words \"mark\", \"spike\" and/or \"shape.\"'\n\t\tprint(mesg)\n\t\tprint(mesg2)\n\t\tprint('\\nFollowing files loaded successfully:\\n')\n\t\tfor i in file_names: print(i)\n\t\treturn data\n\n\t\n\telif len(data.keys()) == len(files):\n\t\tprint('All files imported and assigned')\n\t\tprint('\\nFollowing files loaded successfully:\\n')\n\t\tfor i in file_names: print(i)\n\t\treturn data", "def loaddata(self, directory=None, tag=\"AUC\"):\n self.data = {}\n for c in self.cellLines:\n self.data[c] = {}\n for l in self.ligands:\n self.data[c][l] = []\n\n for l in self.ligands:\n for c in self.cellLines:\n if self.verbose:print(\"Combining all data related to %s/%s\" % (c,l))\n if directory:\n filenames = glob.glob(directory +os.sep+\"%s_%s_%s*\" % (tag, c,l))\n else:\n filenames = glob.glob(\"%s_%s_%s*\" % (tag, c,l))\n\n if self.verbose: print(\" found %s files\\n reading \" % len(filenames))\n\n for filename in filenames:\n fh = open(filename, \"r\")\n data = fh.read().strip().split()\n data = [float(x) for x in data]\n\n try:\n self.data[c][l].extend(data)\n except:\n self.data[c][l] = data\n self._compute_params()", "def importAllDatasets(directory):\n head_index = findIndex(temp_list, \"Gaze\")\n point_index = findIndex(temp_list, \"Point\")\n grab_index = findIndex(temp_list, \"Grab\")\n pos_index = findIndex(temp_list, \"Position\")\n\n head_data = pd.read_csv(temp_list[head_index]) if head_index != None else None\n point_data = pd.read_csv(temp_list[point_index]) if point_index != None else None\n grab_data = pd.read_csv(temp_list[grab_index]) if grab_index != None else None\n pos_data = pd.read_csv(temp_list[pos_index]) if pos_index != None else None\n\n\n return head_data, point_data, grab_data, pos_data", "def load(self):\n self.data = pd.read_pickle(self.DATE_PKL)\n self.data.index.name = DATE_COL\n\n for hname, h in self.handlers.items():\n print(\"Loading %s\" % hname)\n cur_out = '../'+h.out_path\n df = pd.read_pickle(cur_out).resample('D').ffill() # make daily and forward fill the values\n if hname in self.data.columns:\n # getting to a distinct column:\n i = 2\n while \"%s_%s\" % (hname, i) in self.data.columns:\n i += 1\n print(\"warning: %s was already in the data set, instead we merged new column as %s\" %\n (hname, hname + '_%s' % i))\n self.data = self.data.join(df, how='left', rsuffix=\"_%s\" % i)\n else:\n self.data = self.data.join(df, how='left')", "def read_data_files(self):\n\n for name, snap in zip(self.names, self.snaps):\n # build the very important dictionary:\n key = f'{name}_{snap:03}' # e.g 'MW_000'\n self.galaxies[key] = Galaxy(name, snap, self.path, \n self.usesql, self.ptype, self.stride)\n self.time = self.galaxies[key].time\n\n # bits of minor housekeeping:\n # self.path = self.galaxies[key].filepath # may speed up next search\n self.filenames.append(key)", "def loadPoints(self, pointsDir):\n self.pointsDir = pointsDir\n\n for ss in range(len(self.stars)):\n star = self.stars[ss]\n\n # Number of Epochs Detected should be corrected\n # for epochs trimmed out of the *.points files.\n pntsFile = '%s%s.points' % (pointsDir, star.name)\n _pnts = Table.read(pntsFile)\n\n photFile = '%s%s.phot' % (pointsDir, star.name)\n _phot = Table.read(photFile)\n\n star.pointsCnt = _pnts.nrows\n if star.pointsCnt == 0:\n pntDate = np.array([])\n pntX = np.array([])\n pntY = np.array([])\n pntXe = np.array([])\n pntYe = np.array([])\n photDate = np.array([])\n photR = np.array([])\n photX = np.array([])\n photY = np.array([])\n photXe = np.array([])\n photYe = np.array([])\n photM = np.array([])\n photMe = np.array([])\n\n else:\n pntDate = _pnts[0].tonumpy()\n pntX = _pnts[1].tonumpy()\n pntY = _pnts[2].tonumpy()\n pntXe = _pnts[3].tonumpy()\n pntYe = _pnts[4].tonumpy()\n\n photDate = _phot[0].tonumpy()\n photR = _phot[1].tonumpy()\n photX = _phot[2].tonumpy()\n photY = _phot[3].tonumpy()\n photXe = _phot[4].tonumpy()\n photYe = _phot[5].tonumpy()\n photM = _phot[6].tonumpy()\n photMe = _phot[7].tonumpy()\n\n\n # Load up data from the points files.\n for ee in range(len(star.years)):\n ttPnts = (np.where(abs(pntDate - star.years[ee]) < 0.001))[0]\n ttPhot = (np.where(abs(photDate - star.years[ee]) < 0.001))[0]\n\n\n if (len(ttPnts) == 0):\n star.e[ee].pnt_x = -1000.0\n star.e[ee].pnt_y = -1000.0\n star.e[ee].pnt_xe = -1000.0\n star.e[ee].pnt_ye = -1000.0\n else:\n ttPnts = ttPnts[0]\n star.e[ee].pnt_x = pntX[ttPnts]\n star.e[ee].pnt_y = pntY[ttPnts]\n star.e[ee].pnt_xe = pntXe[ttPnts]\n star.e[ee].pnt_ye = pntYe[ttPnts]\n\n if (len(ttPhot) == 0):\n star.e[ee].phot_r = -1000.0\n star.e[ee].phot_x = -1000.0\n star.e[ee].phot_y = -1000.0\n star.e[ee].phot_xe = -1000.0\n star.e[ee].phot_ye = -1000.0\n star.e[ee].phot_mag = -1000.0\n star.e[ee].phot_mage = -1000.0\n else:\n ttPhot = ttPhot[0]\n star.e[ee].phot_r = photR[ttPhot]\n star.e[ee].phot_x = photX[ttPhot]\n star.e[ee].phot_y = photY[ttPhot]\n star.e[ee].phot_xe = photXe[ttPhot]\n star.e[ee].phot_ye = photYe[ttPhot]\n star.e[ee].phot_mag = photM[ttPhot]\n star.e[ee].phot_mage = photMe[ttPhot]", "def total_fire_power_time_series(files, bounding_box):\n \n assert isinstance(bounding_box, BoundingBox)\n bb = bounding_box\n \n results = {}\n \n vals = map(_process_single_fire_power_time_series, zip(files, itertools.repeat(bb)))\n vals = (val for val in vals if val is not None)\n \n for time, val, fname in vals:\n results[time] = (val, fname)\n \n return results", "def parse_and_map(self, local_inet_path):\n for file_name in tqdm(self.filenames):\n # TODO: Add some log while processing data\n # Reads file name from full file path\n sliced_list = file_name.split(sep='/t')[-1].split(sep='_')\n self.data_dict['path'].append(file_name)\n self.data_dict['dataset'].append(sliced_list[1])\n self.data_dict['device'].append(sliced_list[2])\n self.data_dict['wn_id'].append(sliced_list[3])\n self.data_dict['im_id'].append(sliced_list[4])\n self.data_dict['eeg_session'].append(sliced_list[5])\n self.data_dict['global_session'].append(sliced_list[6].split(sep='.')[0])\n # File name: /MindBigData_Imagenet_Insight_n00007846_6247_1_785\n # Imagenet file path: /n00007846/n00007846_6247.JPEG\n file_name = str(sliced_list[3] + '_' + sliced_list[4] + '.JPEG')\n inet_path = os.path.join(local_inet_path, sliced_list[3], file_name)\n # If copy is true, data related local ImageNet images will be copied to separate folder\n if self.copy:\n try:\n # New file paths\n new_dir_path = os.path.join(self.copy_path, sliced_list[3])\n new_inet_path = os.path.join(new_dir_path, file_name)\n # Creates recursive folders in disk\n os.makedirs(new_dir_path, exist_ok=True, mode=0o771)\n # Copies file to destination\n shutil.copy(inet_path, new_inet_path)\n # Appends new file path to list\n self.data_dict['inet_path'].append(new_inet_path)\n except Exception as e:\n # TODO: More useful exception\n print(e)\n else:\n # Append local ImageNet path to list\n self.data_dict['inet_path'].append(inet_path)", "def read_data(args):\n\n print(\"Start read_data\")\n t_tot = 0 # sum of times for the all dataset\n date_dirs = os.listdir(args.path_data_base)\n for n_iter, date_dir in enumerate(date_dirs):\n # get access to each sequence\n path1 = os.path.join(args.path_data_base, date_dir)\n if not os.path.isdir(path1):\n continue\n date_dirs2 = os.listdir(path1)\n\n for date_dir2 in date_dirs2:\n path2 = os.path.join(path1, date_dir2)\n if not os.path.isdir(path2):\n continue\n # read data\n oxts_files = sorted(glob.glob(os.path.join(path2, 'oxts', 'data', '*.txt')))\n oxts = KITTIDataset.load_oxts_packets_and_poses(oxts_files)\n\n \"\"\" Note on difference between ground truth and oxts solution:\n - orientation is the same\n - north and east axis are inverted\n - position are closed to but different\n => oxts solution is not loaded\n \"\"\"\n\n print(\"\\n Sequence name : \" + date_dir2)\n if len(oxts) < KITTIDataset.min_seq_dim: #  sequence shorter than 30 s are rejected\n cprint(\"Dataset is too short ({:.2f} s)\".format(len(oxts) / 100), 'yellow')\n continue\n lat_oxts = np.zeros(len(oxts))\n lon_oxts = np.zeros(len(oxts))\n alt_oxts = np.zeros(len(oxts))\n roll_oxts = np.zeros(len(oxts))\n pitch_oxts = np.zeros(len(oxts))\n yaw_oxts = np.zeros(len(oxts))\n roll_gt = np.zeros(len(oxts))\n pitch_gt = np.zeros(len(oxts))\n yaw_gt = np.zeros(len(oxts))\n t = KITTIDataset.load_timestamps(path2)\n acc = np.zeros((len(oxts), 3))\n acc_bis = np.zeros((len(oxts), 3))\n gyro = np.zeros((len(oxts), 3))\n gyro_bis = np.zeros((len(oxts), 3))\n p_gt = np.zeros((len(oxts), 3))\n v_gt = np.zeros((len(oxts), 3))\n v_rob_gt = np.zeros((len(oxts), 3))\n\n k_max = len(oxts)\n for k in range(k_max):\n oxts_k = oxts[k]\n t[k] = 3600 * t[k].hour + 60 * t[k].minute + t[k].second + t[\n k].microsecond / 1e6\n lat_oxts[k] = oxts_k[0].lat\n lon_oxts[k] = oxts_k[0].lon\n alt_oxts[k] = oxts_k[0].alt\n acc[k, 0] = oxts_k[0].af\n acc[k, 1] = oxts_k[0].al\n acc[k, 2] = oxts_k[0].au\n acc_bis[k, 0] = oxts_k[0].ax\n acc_bis[k, 1] = oxts_k[0].ay\n acc_bis[k, 2] = oxts_k[0].az\n gyro[k, 0] = oxts_k[0].wf\n gyro[k, 1] = oxts_k[0].wl\n gyro[k, 2] = oxts_k[0].wu\n gyro_bis[k, 0] = oxts_k[0].wx\n gyro_bis[k, 1] = oxts_k[0].wy\n gyro_bis[k, 2] = oxts_k[0].wz\n roll_oxts[k] = oxts_k[0].roll\n pitch_oxts[k] = oxts_k[0].pitch\n yaw_oxts[k] = oxts_k[0].yaw\n v_gt[k, 0] = oxts_k[0].ve\n v_gt[k, 1] = oxts_k[0].vn\n v_gt[k, 2] = oxts_k[0].vu\n v_rob_gt[k, 0] = oxts_k[0].vf\n v_rob_gt[k, 1] = oxts_k[0].vl\n v_rob_gt[k, 2] = oxts_k[0].vu\n p_gt[k] = oxts_k[1][:3, 3]\n Rot_gt_k = oxts_k[1][:3, :3]\n roll_gt[k], pitch_gt[k], yaw_gt[k] = IEKF.to_rpy(Rot_gt_k)\n\n t0 = t[0]\n t = np.array(t) - t[0]\n # some data can have gps out\n if np.max(t[:-1] - t[1:]) > 0.1:\n cprint(date_dir2 + \" has time problem\", 'yellow')\n ang_gt = np.zeros((roll_gt.shape[0], 3))\n ang_gt[:, 0] = roll_gt\n ang_gt[:, 1] = pitch_gt\n ang_gt[:, 2] = yaw_gt\n\n p_oxts = lla2ned(lat_oxts, lon_oxts, alt_oxts, lat_oxts[0], lon_oxts[0],\n alt_oxts[0], latlon_unit='deg', alt_unit='m', model='wgs84')\n p_oxts[:, [0, 1]] = p_oxts[:, [1, 0]] # see note\n\n # take correct imu measurements\n u = np.concatenate((gyro_bis, acc_bis), -1)\n # convert from numpy\n t = torch.from_numpy(t)\n p_gt = torch.from_numpy(p_gt)\n v_gt = torch.from_numpy(v_gt)\n ang_gt = torch.from_numpy(ang_gt)\n u = torch.from_numpy(u)\n\n # convert to float\n t = t.float()\n u = u.float()\n p_gt = p_gt.float()\n ang_gt = ang_gt.float()\n v_gt = v_gt.float()\n\n mondict = {\n 't': t, 'p_gt': p_gt, 'ang_gt': ang_gt, 'v_gt': v_gt,\n 'u': u, 'name': date_dir2, 't0': t0\n }\n\n t_tot += t[-1] - t[0]\n KITTIDataset.dump(mondict, args.path_data_save, date_dir2)\n print(\"\\n Total dataset duration : {:.2f} s\".format(t_tot))", "def load_all(): \n training_data = dict() \n for i in range(7):\n training_data[i+1] = load_data(i+1) \n\n return training_data", "def load_data():\n\n dump_path = dump_base + '/micro_poi/mpoi_info/'\n\n assert os.path.exists(dump_path)\n\n dpath = dump_path + 'shortest_path.pickle'\n paths = joblib.load(dpath)\n\n dpath = dump_path + 'path_list.pickle'\n path_list = joblib.load(dpath)\n\n dpath = dump_path + 'gain.pickle'\n gain = joblib.load(dpath)\n\n dpath = dump_path + 'stay.pickle'\n stay_time = joblib.load(dpath)\n\n dpath = dump_path + 'reach.pickle'\n reach_time = joblib.load(dpath)\n\n spath = dump_base + '/micro_poi/model_params.list'\n model_params = np.loadtxt(spath)\n\n return np.array(paths), path_list, gain, stay_time, reach_time, model_params", "def data():\n print (\"&\")\n res = {}\n\t\n # Load Data\n with open(DATA_PATH_TRAIN, 'rb') as f:\n data = pickle.load(f)\n\t\t\n for d in data:\n for j in range(len(d)):\n if not d[j][\"addinfo\"][\"path\"] in res:\n res[d[j][\"addinfo\"][\"path\"]] = {}\n d[j][\"environment\"][\"text\"] = d[j][\"addinfo\"][\"text\"]\n res[d[j][\"addinfo\"][\"path\"]][d[j][\"addinfo\"][\"line\"]] = d[j][\"environment\"]\n\t \t\n with open(DATA_PATH_TEST, 'rb') as f:\n data = pickle.load(f)\n\t\t\n for d in data:\n for j in range(len(d)):\n if not d[j][\"addinfo\"][\"path\"] in res:\n res[d[j][\"addinfo\"][\"path\"]] = {}\n d[j][\"environment\"][\"text\"] = d[j][\"addinfo\"][\"text\"]\n res[d[j][\"addinfo\"][\"path\"]][d[j][\"addinfo\"][\"line\"]] = d[j][\"environment\"]\n\t\t\t\n with open('tasks/env/data/data.json', 'w') as outfile:\n json.dump(res, outfile)", "def merge_all_data(self):\n \n logging.info('***** Starting the merging process ')\n\n \n \"\"\" All possible unqiue_dates to loop on \"\"\"\n date_times = self.merged_unique_dates\n date_times.sort()\n \n date_times = np.array(date_times) \n \n \"\"\" List storing the indices of the date_index of the merged dataset \"\"\"\n all_merged_obs , all_merged_head, all_merged_fb , merged_indices , merged_date_time, mi= [] , [] , [] , [] , [], []\n \n \"\"\" Dictionary that will contain the merged file. \"\"\" \n # rand = datetime.strptime('1981-01-03 12:00:00', '%Y-%m-%d %H:%M:%S') \n #for dt in date_times[3008:3100]: # loop over all the possible date_times \n \n tot = len(date_times)\n for dt, c in zip(date_times[3008:3100], range(tot) ): # loop over all the possible date_times \n #print('Analize : ', str(c) , '/', str(tot) , ' ', dt , ' ', now(time.time()) )\n \n logging.info('Analize : %s %s /', str(c) , str(tot) )\n \n cleaned_df_container = {} \n chunk = ''\n \n for k in self.dataset_per_dt[dt] : # checking the list of available datasets \n \n index, index_up = self.unique_dates[k]['indices'][dt]['low'] , self.unique_dates[k]['indices'][dt]['up'] # extracting the exact chunk of the dataframe where the data of this are stored \n \n chunk = self.data[k]['dataframe'].iloc[index:index_up]\n \n chunk['date_time'] = dt\n chunk = self.clean_dataframe(chunk) # cleaning from wrong or nan values \n \n if len(chunk)==0:\n continue\n \n cleaned_df_container[k] = {} \n cleaned_df_container[k]['df'] = chunk # cleaned dataframe \n\n \n if all(value == 0 for value in cleaned_df_container.values()):\n logging.debug('No data were found! ')\n continue\n \n merged_observations_table, best_ds, duplicates, header = self.merge_record(dt, container = cleaned_df_container)\n \n merged_observations_table['source_id'] = best_ds # adding extra columns i.e. chosen dataset, other dataset with data, number of pressure levels \n merged_observations_table['z_coordinate_type'] = 1 # only pressure inn [Pa] available at the moment. Check z_coordinate_type table for the correpsonding code \n \n \n \"\"\" Extracting the merged feedback, flagging the advanced_observations_feedback flag = 1\"\"\"\n feedback, merged_obs = self.get_reanalysis_feedback( dt, merged_observations_table , reanalysis='era5fb', best_ds= best_ds)\n all_merged_fb.append(feedback) \n all_merged_obs.append(merged_obs)\n \n \"\"\" Setting the correct report_id in the header table \"\"\"\n merged_report_id = merged_obs['report_id'].values[0] # same report_id as calculated in the observation_table \n header['report_id'] = merged_report_id \n all_merged_head.append(header)\n \n #if len(merged_observations_table) != len(header): \n #print('lengths check best ds: ', best_ds , ' obs_merged: ' , len(merged_observations_table), ' feedback:' , len(feedback) , ' header: ' , len(header) )\n #print( len(merged_observations_table), ' ' , len(feedback) )\n\n \"\"\" New merged recordindex and recordtimestamps indices \"\"\"\n merged_indices.append(len(merged_observations_table)) \n merged_date_time.append(dt)\n\n\n \"\"\" Storing the merged date_time values and indices \"\"\"\n di=xr.Dataset()\n merged_date_time = np.array(merged_date_time)\n di['recordtimestamp'] = ( {'recordtimestamp' : merged_date_time.shape } , merged_date_time )\n \n \n \"\"\" Creating the merged indices \"\"\"\n mi.append(0)\n for i,ind in zip(merged_indices[0:], range(len(merged_indices[0:]) ) ) :\n mi.append(mi[ind] + i )\n mi = np.array(mi) \n di['recordindex'] = ( {'recordindex' : mi.shape } , mi )\n self.MergedRecordIndex = di \n \n \n \"\"\" Creating the merged dataframes \"\"\"\n logging.debug('*** Concatenating the observations_table dataframes' ) \n merged_obs = pd.concat (all_merged_obs)\n \n self.MergedObs = merged_obs \n logging.debug('*** Finished concatenating theobservations_table dataframes' ) \n \n logging.debug('*** Concatenating the header_table dataframes' ) \n merged_hd = pd.concat (all_merged_head)\n self.MergedHead = merged_hd \n logging.debug('*** Finished concatenating the header_table dataframes' ) \n \n logging.debug('*** Concatenating the feedback dataframes' ) \n merged_fb = pd.concat (all_merged_fb)\n self.MergedFeedback = merged_fb \n logging.debug('*** Finished concatenating the feedback dataframes' ) \n\n return 0", "def gatherStationData():\n flist = list_files()\n station_dics = {}\n print(\"Reading in csv data...\")\n for f_in in flist:\n start,end = find_timespan(f_in)\n station = station_name(f=f_in)\n print(\"File: {0} Station: {1} {2}--{3}\".format(f_in, \n station, start, end))\n station_dics[station] = read_precip(fname=f_in, \n label=station, start_year=start, end_year=end)\n data_list = []\n for s in station_dics:\n data_list.append(station_dics[s]) \n return pd.concat(data_list,axis=1)", "def _get_data(self) -> dict:\n LOGGER.debug(f\"Setting data property for {self.dirname}\")\n data = {}\n for axis in range(1, 4):\n # Subsample by 8 since this does not vary quickly\n data[f\"aoatter{axis}\"] = (\n self.tlm[f\"aoatter{axis}\"].vals[::ATT_ERR_SUBSAMP].astype(np.float32)\n )\n data[\"aokalstr\"] = self.tlm[\"aokalstr\"].vals\n # fmt: off\n data[\"npnt_kalm\"] = (\n (self.tlm[\"aopcadmd\"].vals == \"NPNT\")\n & (self.tlm[\"aoacaseq\"].vals == \"KALM\")\n )\n # fmt: on\n for slot in range(8):\n data[f\"aca_track{slot}\"] = self.tlm[f\"aoacfct{slot}\"].vals == \"TRAK\"\n data[f\"aca_ir{slot}\"] = self.tlm[f\"aoaciir{slot}\"].vals == \"ERR\"\n data[\"times\"] = self.tlm[\"aokalstr\"].times\n data[\"perigee_times\"] = self.tlm.perigee_times.astype(np.float32)\n data[\"perigee\"] = self.perigee.date\n data[\"rad_entry\"] = self.rad_entry.date\n data[\"rad_exit\"] = self.rad_exit.date\n data[\"obss\"] = self.obss.as_array()\n\n return data", "def load(self):\n\n super(DatasetLoader_XRite2016, self).sync()\n\n keys = (\n 'ColorChecker24 - After November 2014',\n 'ColorChecker24 - Before November 2014',\n 'ColorCheckerSG - After November 2014',\n 'ColorCheckerSG - Before November 2014',\n )\n filenames = (\n 'ColorChecker24_After_Nov2014.txt',\n 'ColorChecker24_Before_Nov2014.txt',\n 'ColorCheckerSG_After_Nov2014.txt',\n 'ColorCheckerSG_Before_Nov2014.txt',\n )\n\n # TODO: Implement support for \"CGATS\" file format in \"Colour\":\n # https://github.com/colour-science/colour/issues/354\n illuminant = (\n CCS_ILLUMINANTS['CIE 1931 2 Degree Standard Observer']['ICC D50'])\n\n self._content = OrderedDict()\n for key, filename in zip(keys, filenames):\n directory = os.path.splitext(filename)[0]\n path = os.path.join(self.record.repository, 'dataset', directory,\n filename)\n\n with codecs.open(path, encoding='utf-8') as xrite_file:\n samples = []\n is_data = False\n lines = filter(\n None, (line.strip() for line in xrite_file.readlines()))\n for line in lines:\n if line == 'END_DATA':\n is_data = False\n\n if is_data:\n tokens = line.split()\n samples.append([\n tokens[0],\n [\n float(value.replace(',', '.'))\n for value in tokens[1:]\n ],\n ])\n\n if line == 'BEGIN_DATA':\n is_data = True\n\n i, j = (6, 4) if len(samples) == 24 else (14, 10)\n samples = np.array(samples)\n samples = np.transpose(samples.reshape([i, j, 2]), [1, 0, 2])\n keys, values = zip(*samples.reshape([-1, 2]))\n values = XYZ_to_xyY(Lab_to_XYZ(values, illuminant))\n self._content[key] = ColourChecker(key,\n OrderedDict(zip(keys, values)),\n illuminant)\n\n return self._content", "def get_files_from_time(self, time_info):\n file_dict = super().get_files_from_time(time_info)\n\n input_files = self.find_input_files(time_info, fill_missing=True)\n if input_files is None:\n return file_dict\n\n for key, value in input_files.items():\n file_dict[key] = value\n\n return file_dict", "def _get_datas(self):\n print(f'base name {self.base_name}')\n data_file_name = glob(osp.join(self.root_dir, MERGED_PATTERN))[0]\n data_df = pd.read_csv(data_file_name)\n\n ppg_d = data_df[['CurrentTimeMillis', 'ch1']].values\n acc_d = data_df[[\n 'EventTimestamp(ns)', 'accel_x', 'accel_y', 'accel_z'\n ]].values\n ppg_d = ppg_d[::2]\n acc_d = acc_d[::2]\n\n return acc_d, ppg_d" ]
[ "0.63508236", "0.6325412", "0.6257426", "0.620853", "0.6198646", "0.61051434", "0.6063956", "0.5999535", "0.5979587", "0.59757197", "0.59742457", "0.59592533", "0.5958371", "0.5948164", "0.59474903", "0.5946523", "0.5943279", "0.5918948", "0.5890966", "0.5865687", "0.585471", "0.5853741", "0.585025", "0.5843356", "0.5841621", "0.58401036", "0.5839462", "0.58370125", "0.58312094", "0.5817668" ]
0.73970014
0
This method gives a possibility to us to export the data directly to the dataBase Sqlite3
def FrameBase_to_Sqlite(self): sql3 = Sql3(self.dataFRAME) # we import the created from us class Sql3 to add the data sql3.sql_write() # very simple and easy
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def export_database(self):\n base_path = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File', filter='CSV (*.csv)')\n database.export_to_csv(DB_PATH, base_path[0])", "def to_sqlite(self, filename):\n\n export_to_db(self.headers, self.data, filename)", "def exportDB(self):\n sourcesession=svc.connect(self.__source,accessMode=coral.access_Update)\n destsession=svc.connect(self.__dest,accessMode = coral.access_Update)\n try:\n dbcp=DBCopy(sourcesession,destsession,1024)\n if self.__all:\n dbcp.copyDB()\n elif self.__inv:\n dbcp.copyInventory()\n elif len(self.__tree) != 0:\n dbcp.copyTrees([self.__tree])\n del sourcesession\n del destsession\n except Exception, e:\n print str(e)\n del sourcesession\n del destsession", "def save_data(df, database_filename):\n engine = create_engine('sqlite:///' +database_filename)\n df.to_sql('Project2', engine, index=False)", "def dbtocsv():\n connection = sqlite3.connect(\"sensordata.db\")\n cursor = connection.cursor()\n cursor.execute(\"Select * from sensordata\")\n roadstationdata = cursor.fetchall()\n\n with open('roadstationdata.csv', 'w') as f:\n writer = csv.writer(f)\n writer.writerow(['id','name','value','unit','time'])\n writer.writerows(roadstationdata)", "def update_database():\n\n # We obtain the data from the official database\n df = getData.extractData()\n\n # We save the dataframe for later use in the API\n auxiliary.saveToCsv(df, 'app/resources')", "def export(self):\n f = open(self.database, 'w')\n for line in self.conn.iterdump():\n f.write(line)\n self.c.close()", "def to_sqlite():\n\tcreate_table()\n\n\tfor file in os.listdir(JSON_PATH):\n\t\tif file.endswith('.json'):\n\t\t\tword = get_word(file)\n\t\t\tprint('inserting ' + word + ' data into db')\n\n\t\t\tpath = os.path.join(JSON_PATH, file)\n\t\t\tjson = readjson(path)\n\n\t\t\tinsert(replace_url(json))\n\n\t# improve SELECT .. WHERE .. performance\n\tCURSOR.execute('CREATE INDEX [idx{}] ON {} ([ID])'.format(TABLE_NAME, TABLE_NAME))\n\n\tCONNECTION.commit()", "def import_data_to_database(self, database_type, data):\n\n if database_type == \"render\":\n connection = sqlite3.connect(self.filepath_render_database)\n pointer = connection.cursor()\n pointer.executemany(\"\"\"\n INSERT INTO render_information\n VALUES (?,?,?,?,?,?,?,?) \n \"\"\",\n (data)\n )\n connection.commit()\n connection.close()\n print(\"addet render information to database\")\n if database_type == \"object\":\n connection = sqlite3.connect(self.filepath_object_database)\n pointer = connection.cursor()\n pointer.executemany(\"\"\"\n INSERT INTO object_information\n VALUES (?,?,?,?,?,?,?,?,?,?,?,?) \n \"\"\",\n (data)\n )\n connection.commit()\n connection.close()\n print(\"addet objectinformation information to database\")\n if database_type == \"output\":\n connection = sqlite3.connect(self.filepath_object_database)\n pointer = connection.cursor()\n pointer.executemany(\"\"\"\n INSERT INTO output_information\n VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,\n ?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?) \n \"\"\",\n (data)\n )\n connection.commit()\n connection.close()\n print(\"addet outputinformation information to database\")\n \n\n return", "def export_db(self, export_location: Path) -> None:\n raise NotImplementedError", "def export_dataset(self):\n raise NotImplementedError", "def object_export_in_dataBase(object_array, pathFile):\n conn = sqlite3.connect(pathFile)\n\n for obj in object_array:\n try:\n obj.export_to_data_base(conn)\n except AttributeError:\n print(\"object need to implement the function: \\'export_to_data_base\\'\")\n\n conn.commit()\n conn.close()", "def _convertDataToSQLite(self, data: TDXData\n ) -> t.Sequence[t.Mapping[t.Text, schemaconverter.SQLVal]]:\n # self.* lookup is slow so do it once only\n general_schema = self.general_schema\n\n # convert all the data to SQLite types\n convert_row = schemaconverter.convertRowToSqlite\n sql_data = [\n convert_row(general_schema, r, data_dir=self.data_dir)\n for r in data]\n return sql_data", "def save_data(df, database_filename): \n engine = create_engine('sqlite:///'+database_filename)\n df.to_sql('messages', engine, index=False, if_exists='replace')", "def save_db(self) -> None:", "def exportTable(self):\n try:\n self.createTable() #Create a table\n self.insertData() #Insert the daily settings\n print('Database has been exported to ' + self.destination + '\\\\'+ self.database + '\\n') #Export the table\n except:\n print('Enigma table already exists for this database. Please choose another database.') #Otherwise inform the user that the table exists\n self.reset() #Prompt a new input for the database name\n self.exportTable() #Try and export the new database using recursion", "def __upload_data(self):\n data_path = \"database\"\n os.makedirs(data_path, exist_ok=True)\n try:\n conn = sqlite3.connect('database/customers.db')\n query = '''CREATE TABLE IF NOT EXISTS all_customers_database (\n first_name TEXT, second_name TEXT,\n gender TEXT, account_type TEXT, account_number INTEGER PRIMARY KEY UNIQUE NOT NULL,\n account_password VARCHAR, account_balance REAL );'''\n #Create table\n cursor = conn.cursor()\n print(\"Connection sucessful\")\n cursor.execute(query)\n conn.commit()\n print(\"Table created\")\n #Insert a row to a database\n insert_query ='''INSERT INTO all_customers_database\n (first_name, second_name, gender, account_type, account_number, account_password, account_balance)\n VALUES \n (?, ?, ?, ?, ?, ?, ?);'''\n conn.execute(insert_query, (self.first_name, self.second_name, self.gender, self.account_type, self.account_number, self.account_password, self.account_balance))\n print(\"Your details saved successfully.\")\n except sqlite3.Error as err:\n # print(\"Error while creating a sqlite table \", err)\n print(\"Error creating database\")\n finally:\n if conn:\n conn.close()\n # print(\"Sqlite connection closed.\")", "def _create_sql(self):\n\n pdbfile = self.pdbfile\n sqlfile = self.sqlfile\n\n if self.verbose:\n print('-- Create SQLite3 database')\n\n #name of the table\n #table = 'ATOM'\n\n # column names and types\n self.col = {'serial' : 'INT',\n 'name' : 'TEXT',\n 'altLoc' : 'TEXT',\n 'resName' : 'TEXT',\n 'chainID' : 'TEXT',\n 'resSeq' : 'INT',\n 'iCode' : 'TEXT',\n 'x' : 'REAL',\n 'y' : 'REAL',\n 'z' : 'REAL',\n 'occ' : 'REAL',\n 'temp' : 'REAL'}\n\n # delimtier of the column format\n # taken from http://www.wwpdb.org/documentation/file-format-content/format33/sect9.html#ATOM\n self.delimiter = {\n 'serial' : [6,11],\n 'name' : [12,16],\n 'altLoc' : [16,17],\n 'resName' :[17,20],\n 'chainID' :[21,22],\n 'resSeq' :[22,26],\n 'iCode' :[26,26],\n 'x' :[30,38],\n 'y' :[38,46],\n 'z' :[46,54],\n 'occ' :[54,60],\n 'temp' :[60,66]}\n\n if self.no_extra:\n del self.col['occ']\n del self.col['temp']\n\n # size of the things\n ncol = len(self.col)\n ndel = len(self.delimiter)\n\n\n # open the data base\n # if we do not specify a db name\n # the db is only in RAM\n # there might be little advantage to use memory\n # https://stackoverflow.com/questions/764710/sqlite-performance-benchmark-why-is-memory-so-slow-only-1-5x-as-fast-as-d\n if self.sqlfile is None:\n self.conn = sqlite3.connect(':memory:')\n \n # or we create a new db file\n else:\n if os.path.isfile(sqlfile):\n sp.call('rm %s' %sqlfile,shell=True)\n self.conn = sqlite3.connect(sqlfile)\n self.c = self.conn.cursor()\n\n # intialize the header/placeholder\n header,qm = '',''\n for ic,(colname,coltype) in enumerate(self.col.items()):\n header += '{cn} {ct}'.format(cn=colname,ct=coltype)\n qm += '?'\n if ic < ncol-1:\n header += ', '\n qm += ','\n\n # create the table\n query = 'CREATE TABLE ATOM ({hd})'.format(hd=header)\n self.c.execute(query)\n\n\n # read the pdb file\n # this is dangerous if there are ATOM written in the comment part\n # which happends often\n #data = sp.check_output(\"awk '/ATOM/' %s\" %pdbfile,shell=True).decode('utf8').split('\\n')\n\n # a safer version consist at matching against the first field\n # won't work on windows\n #data = sp.check_output(\"awk '$1 ~ /^ATOM/' %s\" %pdbfile,shell=True).decode('utf8').split('\\n')\n\n # a pure python way\n # RMK we go through the data twice here. Once to read the ATOM line and once to parse the data ...\n # we could do better than that. But the most time consuming step seems to be the CREATE TABLE query\n # if we path a file we read it\n if isinstance(pdbfile,str):\n if os.path.isfile(pdbfile):\n with open(pdbfile,'r') as fi:\n data = [line.split('\\n')[0] for line in fi if line.startswith('ATOM')]\n else:\n raise FileNotFoundError('File %s was not found',pdbfile)\n\n # if we pass a list as for h5py read/write\n # we directly use that\n elif isinstance(pdbfile,np.ndarray):\n data = [l.decode('utf-8') for l in pdbfile.tolist()]\n\n # if we cant read it\n else:\n print(pdbfile)\n raise ValueError('PDB data not recognized')\n\n # if there is no ATOM in the file\n if len(data)==1 and data[0]=='':\n print(\"-- Error : No ATOM in the pdb file.\")\n self.is_valid = False\n return\n\n # haddock chain ID fix\n del_copy = self.delimiter.copy()\n if data[0][del_copy['chainID'][0]] == ' ':\n del_copy['chainID'] = [72,73]\n\n # get all the data\n data_atom = []\n for iatom,atom in enumerate(data):\n\n # sometimes we still have an empty line somewhere\n if len(atom) == 0:\n continue\n\n # browse all attribute of each atom\n at = ()\n for ik,(colname,coltype) in enumerate(self.col.items()):\n\n # get the piece of data\n data = atom[del_copy[colname][0]:del_copy[colname][1]].strip()\n\n # convert it if necessary\n if coltype == 'INT':\n data = int(data)\n elif coltype == 'REAL':\n data = float(data)\n\n # append keep the comma !!\n # we need proper tuple\n at +=(data,)\n\n # append\n data_atom.append(at)\n\n\n # push in the database\n self.c.executemany('INSERT INTO ATOM VALUES ({qm})'.format(qm=qm),data_atom)", "def save_data(df: pd.DataFrame, database_filename: str) -> None:\n engine = create_engine(f\"sqlite:///{database_filename}\")\n df.to_sql(Path(database_filename).stem, engine, index=False, if_exists=\"replace\")", "def save_data(df, database_filename):\n engine = create_engine(f\"sqlite:///{database_filename}\")\n df.to_sql(\"YourTableName\", engine, index=False, if_exists=\"replace\")", "def export_sql(meta, data, output):\n\n tables = [table for table in meta.sorted_tables if table.name in data]\n preparer = IdentifierPreparer(meta.bind.dialect)\n prepare_column = lambda column: preparer.format_column(column, name=column.name)\n output_file = open(output, 'w')\n\n for table in tables:\n columns = ', '.join([ prepare_column(column) for column in table.columns.values() ])\n for row in data[table.name].values():\n values = list(map(_transform, list(row.values())))\n insert = \"INSERT INTO %s (%s) VALUES (%s);\\n\" % (\n preparer.format_table(table, name=table.name),\n columns,\n ', '.join(values)\n )\n output_file.write(insert)\n\n output_file.close()", "def export_table (self,_w):\n try:\n _data = \"\"\n maxRow = _w.rowCount()\n maxColumn = _w.columnCount()\n for hc in range(0,maxColumn):\n try: _hci = str(_w.horizontalHeaderItem(hc).text())\n except:_hci=\"None\";pass\n if hc == (maxColumn-1) :_data += _hci\n elif hc < maxColumn:_data += \"%s,\" % _hci\n _data += \"\\n\"\n for r in range(0, maxRow):\n for c in range(0, maxColumn):\n _d = str(_w.item(r, c).text())\n if c == (maxColumn-1):_data += _d\n elif c < maxColumn:_data += \"%s,\" % _d\n _data += \"\\n\"\n options = QFileDialog.Options()\n saved_file, _ = QFileDialog.getSaveFileName(self, \"Save Table to file \", \"data\", \"Plain Text (*.txt);;CSV (*.csv);;All Files (*)\", options=options)\n _file = open(saved_file, 'w')\n _file.write(_data)\n _file.close()\n except FileNotFoundError:pass", "def save_to_output_database(self):\n connection = sqlite3.connect(self.filepath_output_database)\n pointer = connection.cursor()\n\n sql_anweisung = \"\"\"\n INSERT INTO objects (\n image_id,\n object_name,\n object_type,\n object_amount,\n object_location_x,\n object_location_y,\n object_location_z,\n object_rotation_x,\n object_rotation_y,\n object_rotation_z,\n object_dimensions_x,\n object_dimensions_y,\n object_dimensions_z\n )\n VALUES (\n :image_id,\n :object_name,\n :object_type,\n :object_amount,\n :object_location_x,\n :object_location_y,\n :object_location_z,\n :object_rotation_x,\n :object_rotation_y,\n :object_rotation_z,\n :object_dimensions_x,\n :object_dimensions_y,\n :object_dimensions_z\n )\n \"\"\"\n pointer.executemany(sql_anweisung, self.object_information)\n connection.commit()\n\n sql_anweisung = \"\"\"\n INSERT INTO camera_settings (\n image_id,\n image_variation,\n camera_name,\n camera_location_x,\n camera_location_y,\n camera_location_z,\n camera_rotation_x,\n camera_rotation_y,\n camera_rotation_z,\n camera_focal_length,\n camera_polar_angle,\n camera_azimuth_angle\n )\n VALUES (\n :image_id,\n :image_variation,\n :camera_name,\n :camera_location_x,\n :camera_location_y,\n :camera_location_z,\n :camera_rotation_x,\n :camera_rotation_y,\n :camera_rotation_z,\n :camera_focal_length,\n :camera_polar_angle,\n :camera_azimuth_angle\n )\n \"\"\"\n pointer.executemany(sql_anweisung, self.camera_information)\n connection.commit()\n\n sql_anweisung = \"\"\"\n INSERT INTO light_settings (\n image_id,\n image_variation,\n light_name,\n light_location_x,\n light_location_y,\n light_location_z,\n light_rotation_x,\n light_rotation_y,\n light_rotation_z,\n light_intensity,\n light_polar_angle,\n light_azimuth_angle\n )\n VALUES (\n :image_id,\n :image_variation,\n :light_name,\n :light_location_x,\n :light_location_y,\n :light_location_z,\n :light_rotation_x,\n :light_rotation_y,\n :light_rotation_z,\n :light_intensity,\n :light_polar_angle,\n :light_azimuth_angle \n )\n \"\"\"\n pointer.executemany(sql_anweisung, self.light_information)\n connection.commit()\n\n sql_anweisung = \"\"\"\n INSERT INTO general_settings (\n image_id,\n render_type,\n render_frame\n )\n VALUES (\n :image_id,\n :render_type,\n :render_frame\n )\n \"\"\"\n pointer.executemany(sql_anweisung, self.general_information)\n connection.commit()\n sql_anweisung = \"\"\"\n INSERT INTO bounding_boxes(\n image_id ,\n image_variation ,\n object_name,\n object_type,\n min_x,\n max_x,\n min_y,\n max_y\n )\n VALUES(\n :image_id ,\n :image_variation ,\n :object_name,\n :object_type,\n :min_x,\n :max_x,\n :min_y,\n :max_y\n )\n \"\"\"\n pointer.executemany(sql_anweisung, self.bounding_box_information)\n connection.commit()\n print(\"outputdatabase saved\")\n connection.close()\n print(\"Saved to output Database\")\n pass", "def save_data(df, database_filename):\n engine = create_engine('sqlite:///'+database_filename)\n df.to_sql('disasterdata', engine, index=False)", "def test_sql_to_csv():\n csv_outfile = 'optwrf_database.csv'\n db_conn = conn_to_db('optwrf.db')\n sql_to_csv(csv_outfile, db_conn)\n close_conn_to_db(db_conn)\n assert os.path.exists(csv_outfile) == 1", "def create_database(self):\n\n try: \n connection = sqlite3.connect(self.filepath_render_database)\n pointer = connection.cursor()\n\n print(self.filepath_render_database)\n\n sql_instruction = \"\"\"\n CREATE TABLE IF NOT EXISTS render_information(\n \n object_type VARCHAR(255),\n name VARCHAR(255),\n radius REAL,\n polar_angle_min REAL,\n polar_anglel_max REAL,\n polar_angle_segments REAL,\n polar_angle_random_rad REAL,\n azimuth_angle_min REAL,\n azimuth_angle_max REAL,\n azimuth_angle_segments REAL,\n azimuth_angle_random_rad REAL,\n tracking_obj VARCHAR(255),\n segmentation VARCHAR(255)\n\n\n );\"\"\"\n pointer.execute(sql_instruction)\n connection.commit()\n connection.close()\n print(\"Creating render database file\")\n except:\n print(\"Was not able to create render database file\")\n \n try: \n connection = sqlite3.connect(self.filepath_object_database)\n pointer = connection.cursor()\n sql_instruction = \"\"\"\n CREATE TABLE IF NOT EXISTS object_information(\n obj_filepath VARCHAR(255),\n obj_name VARCHAR(255),\n obj_type VARCHAR(255),\n obj_scale_factor REAL,\n obj_location_x REAL,\n obj_location_y REAL,\n obj_location_z REAL,\n obj_rotation_x REAL,\n obj_rotation_y REAL,\n obj_rotation_z REAL,\n obj_amount_percent REAL,\n obj_material_path VARCHAR(255),\n obj_point_in_time VARCHAR(255),\n maximum_random_rotation_degree_z REAL,\n maximum_random_translation REAL,\n random_amount REAL\n );\"\"\"\n pointer.execute(sql_instruction)\n connection.commit()\n connection.close()\n print(\"Creating object database file\")\n except:\n print(\"Was not able to create object database file\")\n\n try: \n connection = sqlite3.connect(self.filepath_output_database)\n print(\"outputfilepath is:\", self.filepath_output_database)\n pointer = connection.cursor()\n sql_instruction = \"\"\"\n CREATE TABLE IF NOT EXISTS objects(\n image_id REAL,\n object_name VARCHAR(255),\n object_type VARCHAR(255),\n object_amount REAL,\n object_location_x REAL,\n object_location_y REAL,\n object_location_z REAL,\n object_rotation_x REAL,\n object_rotation_y REAL,\n object_rotation_z REAL,\n object_dimensions_x REAL,\n object_dimensions_y REAL,\n object_dimensions_z REAL\n );\"\"\"\n pointer.execute(sql_instruction)\n connection.commit()\n sql_instruction = \"\"\"\n CREATE TABLE IF NOT EXISTS camera_settings(\n image_id REAL,\n image_variation REAL,\n camera_name VARCHAR(255),\n camera_location_x REAL,\n camera_location_y REAL,\n camera_location_z REAL,\n camera_rotation_x REAL,\n camera_rotation_y REAL,\n camera_rotation_z REAL,\n camera_focal_length REAL,\n camera_polar_angle REAL,\n camera_azimuth_angle REAL\n );\"\"\"\n pointer.execute(sql_instruction)\n connection.commit()\n sql_instruction = \"\"\"\n CREATE TABLE IF NOT EXISTS light_settings(\n image_id REAL,\n image_variation REAL,\n light_name VARCHAR(255),\n light_location_x REAL,\n light_location_y REAL,\n light_location_z REAL,\n light_rotation_x REAL,\n light_rotation_y REAL,\n light_rotation_z REAL,\n light_intensity REAL,\n light_polar_angle REAL,\n light_azimuth_angle REAL\n );\"\"\"\n pointer.execute(sql_instruction)\n connection.commit()\n sql_instruction = \"\"\"\n CREATE TABLE IF NOT EXISTS general_settings(\n image_id REAL,\n render_type VARCHAR(255),\n render_frame REAL\n );\"\"\"\n pointer.execute(sql_instruction)\n connection.commit()\n sql_instruction = \"\"\"\n CREATE TABLE IF NOT EXISTS bounding_boxes(\n image_id REAL,\n image_variation REAL,\n object_name VARCHAR(255),\n object_type VARCHAR(255),\n min_x REAL,\n max_x REAL,\n min_y REAL,\n max_y REAL\n );\"\"\"\n pointer.execute(sql_instruction)\n connection.commit()\n connection.close()\n print(\"Creating output database file\")\n except:\n print(\"Was not able to create output database file\")", "def create_database(db_file: str, table_data: List) -> None:\n connection = None\n add_col = []\n\n table_root = '''id INTEGER PRIMARY KEY,\n iteration INTEGER NOT NULL,\n best_local_min TEXT NOT NULL,\n current_epoch INTEGER NOT NULL,\n trades_count INTEGER NOT NULL,\n avg_profit_pct REAL NOT NULL,\n total_profit_currency REAL NOT NULL,\n total_profit_pct REAL NOT NULL,\n avg_duration_minutes REAL NOT NULL,\n loss_func REAL NOT NULL, '''\n\n spaces_col = {'buy': 'buy TEXT NOT NULL',\n 'sell': 'sell TEXT NOT NULL',\n 'roi': 'roi TEXT NOT NULL',\n 'stoploss': 'stoploss TEXT NOT NULL',\n 'trailing': 'trailing TEXT NOT NULL'}\n\n try:\n os.remove(db_file)\n except OSError as err:\n print(err)\n\n try:\n connection = sqlite3.connect(db_file)\n cursor = connection.cursor()\n print(f\"{Fore.MAGENTA}Successfully connected to SQLite DB - {db_file}{Fore.RESET}\")\n\n if 'all' in table_data:\n table_data = ['buy', 'sell', 'roi', 'stoploss', 'trailing']\n elif 'default' in table_data:\n table_data = ['buy', 'sell', 'roi', 'stoploss']\n\n for param in table_data:\n add_col.append(spaces_col[param])\n\n table_root += ', '.join(add_col)\n\n create_hyperopt_data_table = 'CREATE TABLE hyperopt_results (' + table_root + ');'\n\n cursor.execute(create_hyperopt_data_table)\n connection.commit()\n print(f'{Fore.MAGENTA}Table successfully created.{Fore.RESET}')\n\n cursor.close()\n except sqlite3.Error as err:\n print(err)\n finally:\n if connection:\n connection.close()\n print(f'{Fore.MAGENTA}The SQLite connection is closed{Fore.RESET}')", "def write_database(self,data):\n \n if not os.path.exists(self.database):\n output = FileTools.safe_hdf5_open(self.database,'w')\n else:\n output = FileTools.safe_hdf5_open(self.database,'a')\n\n obsid = self.getObsID(data)\n if obsid in output:\n grp = output[obsid]\n else:\n grp = output.create_group(obsid)\n\n grp.attrs['level3_filename'] = self.outfile\n\n if self.name in grp:\n del grp[self.name]\n lvl3 = grp.create_group(self.name)\n\n lvl3.attrs['version'] = __level3_version__\n lvl3.attrs['calibrator_obsid'] = self.nearest_calibrator\n lvl3.attrs['calibrator_source'] = self.cal_source\n output.close()", "def convert_csv_to_SQLite3(self,\n csv_path: str=None, # Path to .csv \n destination: str=None, # Where to create .db\n db_name: str=None, # Database name\n table_name: str=None, # table name\n **kwargs # Custom arguments for reader and writter\n ):\n # With scribe reader, read a .csv \n # **kwargs, are used in params in the subclass Scibe_File_Writter\n # **Kwargs Over-write convert_csv_to_db params\n # Inherits from scribe_readers.Scribe_File_Reader\n self.read_from_csv(csv_path, **kwargs) # Inherits from scribe_readers.Scribe_File_Reader\n if db_name != None:\n destination = f\"{destination}\\{db_name}.db\"\n self.db_name = db_name\n conn = self.create_sqlite_connection(destination) # Inherits from scribe_writers_Scribe_Scribe_SQLite_Writer\n # Create connection also creates new db if it does not exist.\n self.create_new_sqlite_table(conn=conn,\n schema=self.dtypes,\n table_name=f\"tbl_{table_name}\",\n close_conn =False)\n \n \"\"\"Insert data into SQLite database\"\"\"\n\n table_name=f\"tbl_{table_name}\"\n self.insert_into_sqlite_table(conn,\n csv_path,\n table_name,\n self.shape,\n self.delimiter)", "def export_db_to_json(self, out_json_file):\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n\n eodd_utils = eodatadown.eodatadownutils.EODataDownUtils()\n\n query_result = ses.query(EDDSentinel1ASF).all()\n db_scn_dict = dict()\n for scn in query_result:\n db_scn_dict[scn.PID] = dict()\n db_scn_dict[scn.PID]['PID'] = scn.PID\n db_scn_dict[scn.PID]['Scene_ID'] = scn.Scene_ID\n db_scn_dict[scn.PID]['Product_Name'] = scn.Product_Name\n db_scn_dict[scn.PID]['Product_File_ID'] = scn.Product_File_ID\n db_scn_dict[scn.PID]['ABS_Orbit'] = scn.ABS_Orbit\n db_scn_dict[scn.PID]['Rel_Orbit'] = scn.Rel_Orbit\n db_scn_dict[scn.PID]['Doppler'] = scn.Doppler\n db_scn_dict[scn.PID]['Flight_Direction'] = scn.Flight_Direction\n db_scn_dict[scn.PID]['Granule_Name'] = scn.Granule_Name\n db_scn_dict[scn.PID]['Granule_Type'] = scn.Granule_Type\n db_scn_dict[scn.PID]['Incidence_Angle'] = scn.Incidence_Angle\n db_scn_dict[scn.PID]['Look_Direction'] = scn.Look_Direction\n db_scn_dict[scn.PID]['Platform'] = scn.Platform\n db_scn_dict[scn.PID]['Polarization'] = scn.Polarization\n db_scn_dict[scn.PID]['Process_Date'] = eodd_utils.getDateTimeAsString(scn.Process_Date)\n db_scn_dict[scn.PID]['Process_Description'] = scn.Process_Description\n db_scn_dict[scn.PID]['Process_Level'] = scn.Process_Level\n db_scn_dict[scn.PID]['Process_Type'] = scn.Process_Type\n db_scn_dict[scn.PID]['Process_Type_Disp'] = scn.Process_Type_Disp\n db_scn_dict[scn.PID]['Acquisition_Date'] = eodd_utils.getDateTimeAsString(scn.Acquisition_Date)\n db_scn_dict[scn.PID]['Sensor'] = scn.Sensor\n db_scn_dict[scn.PID]['BeginPosition'] = eodd_utils.getDateTimeAsString(scn.BeginPosition)\n db_scn_dict[scn.PID]['EndPosition'] = eodd_utils.getDateTimeAsString(scn.EndPosition)\n db_scn_dict[scn.PID]['North_Lat'] = scn.North_Lat\n db_scn_dict[scn.PID]['South_Lat'] = scn.South_Lat\n db_scn_dict[scn.PID]['East_Lon'] = scn.East_Lon\n db_scn_dict[scn.PID]['West_Lon'] = scn.West_Lon\n db_scn_dict[scn.PID]['Remote_URL'] = scn.Remote_URL\n db_scn_dict[scn.PID]['Remote_FileName'] = scn.Remote_FileName\n db_scn_dict[scn.PID]['Remote_URL_MD5'] = scn.Remote_URL_MD5\n db_scn_dict[scn.PID]['Total_Size'] = scn.Total_Size\n db_scn_dict[scn.PID]['Query_Date'] = eodd_utils.getDateTimeAsString(scn.Query_Date)\n db_scn_dict[scn.PID]['Download_Start_Date'] = eodd_utils.getDateTimeAsString(scn.Download_Start_Date)\n db_scn_dict[scn.PID]['Download_End_Date'] = eodd_utils.getDateTimeAsString(scn.Download_End_Date)\n db_scn_dict[scn.PID]['Downloaded'] = scn.Downloaded\n db_scn_dict[scn.PID]['Download_Path'] = scn.Download_Path\n db_scn_dict[scn.PID]['Archived'] = scn.Archived\n db_scn_dict[scn.PID]['ARDProduct_Start_Date'] = eodd_utils.getDateTimeAsString(scn.ARDProduct_Start_Date)\n db_scn_dict[scn.PID]['ARDProduct_End_Date'] = eodd_utils.getDateTimeAsString(scn.ARDProduct_End_Date)\n db_scn_dict[scn.PID]['ARDProduct'] = scn.ARDProduct\n db_scn_dict[scn.PID]['ARDProduct_Path'] = scn.ARDProduct_Path\n db_scn_dict[scn.PID]['DCLoaded_Start_Date'] = eodd_utils.getDateTimeAsString(scn.DCLoaded_Start_Date)\n db_scn_dict[scn.PID]['DCLoaded_End_Date'] = eodd_utils.getDateTimeAsString(scn.DCLoaded_End_Date)\n db_scn_dict[scn.PID]['DCLoaded'] = scn.DCLoaded\n db_scn_dict[scn.PID]['Invalid'] = scn.Invalid\n db_scn_dict[scn.PID]['ExtendedInfo'] = scn.ExtendedInfo\n db_scn_dict[scn.PID]['RegCheck'] = scn.RegCheck\n ses.close()\n\n db_plgin_dict = dict()\n if self.calc_scn_usr_analysis():\n plugin_keys = self.get_usr_analysis_keys()\n for plgin_key in plugin_keys:\n query_result = ses.query(EDDSentinel1ASFPlugins).filter(EDDSentinel1ASFPlugins.PlugInName == plgin_key).all()\n db_plgin_dict[plgin_key] = dict()\n for scn in query_result:\n db_plgin_dict[plgin_key][scn.Scene_PID] = dict()\n db_plgin_dict[plgin_key][scn.Scene_PID]['Scene_PID'] = scn.Scene_PID\n db_plgin_dict[plgin_key][scn.Scene_PID]['PlugInName'] = scn.PlugInName\n db_plgin_dict[plgin_key][scn.Scene_PID]['Start_Date'] = eodd_utils.getDateTimeAsString(\n scn.Start_Date)\n db_plgin_dict[plgin_key][scn.Scene_PID]['End_Date'] = eodd_utils.getDateTimeAsString(scn.End_Date)\n db_plgin_dict[plgin_key][scn.Scene_PID]['Completed'] = scn.Completed\n db_plgin_dict[plgin_key][scn.Scene_PID]['Success'] = scn.Success\n db_plgin_dict[plgin_key][scn.Scene_PID]['Outputs'] = scn.Outputs\n db_plgin_dict[plgin_key][scn.Scene_PID]['Error'] = scn.Error\n db_plgin_dict[plgin_key][scn.Scene_PID]['ExtendedInfo'] = scn.ExtendedInfo\n ses.close()\n\n fnl_out_dict = dict()\n fnl_out_dict['scn_db'] = db_scn_dict\n if db_plgin_dict:\n fnl_out_dict['plgin_db'] = db_plgin_dict\n\n with open(out_json_file, 'w') as outfile:\n json.dump(fnl_out_dict, outfile, indent=4, separators=(',', ': '), ensure_ascii=False)" ]
[ "0.7042345", "0.6815941", "0.67209786", "0.6496771", "0.64334315", "0.6408996", "0.6384497", "0.6370556", "0.63649917", "0.63233453", "0.629178", "0.6223343", "0.61962664", "0.6180101", "0.6169131", "0.61296827", "0.6062206", "0.605488", "0.59702414", "0.59666187", "0.59628606", "0.5919271", "0.5902367", "0.5890637", "0.5879333", "0.5839654", "0.58231", "0.5783758", "0.57727724", "0.5772164" ]
0.76379764
0
This method checks how many experiments are given and create a key for this experiment with the data for it in a Experimets dictionary
def exper(self): self.dbase.pop('time') # since we do not want the time data to be included in our calculation we drop it out. ind=list(zip(self.start, self.stop)) # here I recomend to Google; 'zip , list python' to understand what is going on :) Experiments={} # assigning a local dictionary variable for x in range(self.NrExperiments): Experiments["Experiment{0}".format(x)]=[] # this two lines creates keys for each experment. For each experiment we are going to collect mean and Std # next passage is a little bit harsh to digest att once for i in range(self.NrExperiments): # we are looping n-times n=number of experiments for key in sorted(self.dbase.keys()): # every time we are going through each key of the dictionary with the data if len(key) == 2: # we check how the key looks like . If you remmember the first number of the key correspons to the row at which cells is, and # the second part of the key corresponds to the column the cell is comming from . For example key = '32' tells you row = 3 , column = 2 if int(key[0]) in list(range(ind[i][0],ind[i][1])) and key[1]==str(self.col): # here we check if the first number of the key (key[0])is in the range of stat-stop row and att the same time # att which column key[1]. If it is in the searched column and rows we append it to the expriment of interest Experiments["Experiment{0}".format(i)].append(self.dbase[key]) else: if int(key[0]) in list(range(ind[i][0],ind[i][1])) and key[1]+key[2] ==str(self.col): # WE have columns 10, 11, 12 wich have key like for ex. key = '212' , which tells you row= 2, column = '12' Experiments["Experiment{0}".format(i)].append(self.dbase[key]) # this is the same as above self.ListExperiments.append(np.array(Experiments["Experiment{0}".format(i)])) # we collect at the end all data for our experiments in a final list 'ListExperiments' return self.ListExperiments
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def experiments(self, key, value):\n experiments = self.get('experiments', [])\n\n name = value.get('e')\n recid = value.get('0')\n record = get_record_ref(recid, 'experiments')\n\n experiments.append({\n 'curated_relation': record is not None,\n 'name': name,\n 'record': record\n })\n\n return experiments", "def create_experiment_dict(algorithm_name, thresholds):\n experiment_dict = {}\n for threshold in thresholds:\n experiment_dict[str(threshold)] = {}\n experiment_dict[str(threshold)][algorithm_name] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n list_dict = {'TP': 0, 'FP': 1, 'TN': 2, 'FN': 3, 'TPR': 4, 'TNR': 5, 'PRECISION': 6, 'RECALL': 7,\n 'F1 MEASURE': 8, 'ACCURACY': 9}\n return experiment_dict, list_dict", "def experiment_fields(self):\n return {\n 'experiment_name': ['experiments', 'hp_combo_history'],\n 'model_struct': ['experiments', 'hp_combo_history'],\n 'loss_function': ['experiments', 'hp_combo_history'],\n 'regularization_type': ['experiments', 'hp_combo_history'],\n 'regularization_strength': ['experiments', 'hp_combo_history'],\n 'optimizer': ['experiments', 'hp_combo_history'],\n 'lr': ['experiments', 'hp_combo_history'],\n 'dataset': ['experiments', 'hp_combo_history'],\n 'regularization_type_domain': ['experiments', 'hp_combo_history'],\n 'regularization_strength_domain': [\n 'experiments', 'hp_combo_history'],\n 'optimizer_domain': ['experiments', 'hp_combo_history'],\n 'lr_domain': ['experiments', 'hp_combo_history'],\n 'timesteps': ['experiments', 'hp_combo_history'],\n 'timesteps_domain': ['experiments', 'hp_combo_history'],\n 'filter_size': ['experiments', 'hp_combo_history'],\n 'filter_size_domain': ['experiments', 'hp_combo_history'],\n 'u_t_domain': ['experiments', 'hp_combo_history'],\n 'q_t_domain': ['experiments', 'hp_combo_history'],\n 't_t_domain': ['experiments', 'hp_combo_history'],\n 'p_t_domain': ['experiments', 'hp_combo_history'],\n 'u_t': ['experiments', 'hp_combo_history'],\n 'q_t': ['experiments', 'hp_combo_history'],\n 't_t': ['experiments', 'hp_combo_history'],\n 'p_t': ['experiments', 'hp_combo_history'],\n 'hp_optim': ['experiments', 'hp_combo_history'],\n 'hp_max_studies': ['experiments', 'hp_combo_history'],\n 'hp_current_iteration': ['experiments', 'hp_combo_history'],\n 'normalize_labels': ['experiments', 'hp_combo_history'],\n 'experiment_iteration': ['experiments', 'hp_combo_history']\n }", "def only_experiments_db(storage, exp_config):\n for exp in exp_config[0]:\n storage.create_experiment(exp)", "def load_experiment_results(discount_factor=0.99, read_log=False, num_episodes=700):\n result = {}\n\n for experiment_dir in list(results_dir.glob('*')):\n # skip yaml and other bs files\n if not experiment_dir.is_dir():\n continue\n\n hydra_dir = experiment_dir / '.hydra'\n config_file = hydra_dir / 'config.yaml'\n\n with open(config_file) as f:\n config = dict(yaml.safe_load(f))\n seed = config.pop('seed')\n config = DictConfig(config)\n\n environment = config.env\n experiment_setting = get_experiment_setting(config)\n \n if config.discount_factor == discount_factor and config.num_episodes == num_episodes:\n if environment not in result:\n result[environment] = {}\n if experiment_setting not in result[environment]:\n result[environment][experiment_setting] = {}\n if config not in result[environment][experiment_setting]:\n result[environment][experiment_setting][config] = {}\n # if same experiment and same seed, only use one of them and skip the rest\n if seed not in result[environment][experiment_setting][config]:\n if not read_log:\n # some experiments may still be running, so exp_records not yet written\n try:\n data = pd.read_csv(experiment_dir / 'exp_records.csv', index_col=0)\n except FileNotFoundError:\n continue\n else:\n log_file = experiment_dir / 'dqn.log'\n with open(log_file) as f:\n data = f.read()\n result[environment][experiment_setting][config][seed] = data\n\n return result", "def assigned_exercises(self):\n\n exercises = dict()\n\n with sqlite3.connect(self.db_path) as conn:\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select\n e.id ExerciseId,\n e.name,\n s.id,\n s.first_name,\n s.last_name\n from exercises e\n join student_exercises se on se.exercise_id = e.id\n join students s on s.id = se.student_id\n \"\"\")\n\n dataset = db_cursor.fetchall()\n\n for row in dataset:\n exercise_id = row[0]\n exercise_name = row[1]\n student_id = row[2]\n student_name = f'{row[3]} {row[4]}'\n \n if exercise_name not in exercises:\n exercises[exercise_name] = [student_name]\n # {\"Kennel\": [\"Brian Cravens\"]}\n else:\n exercises[exercise_name].append(student_name)\n # {\"Kennel\": [\"Brian Cravens\", \"Joe Montana\"]}\n for exercise_name, students in exercises.items():\n print(f'\\n{exercise_name}')\n for student in students:\n print(f'\\t* {student}')", "def createIndivitual(self) -> Dict[str, Any]:\n ind = {\n \"genome\": {\n key: numpy.random.randint(0, len(value), size=self.ref_count[key]) for (\n key, value) in self.grammar.items()\n },\n \"fitness\": None,\n \"fenotype\": None,\n }\n return ind", "def create_experiment_if_needed(tr):\n exp = tr.getExperiment(EXPERIMENT_ID)\n if None == exp:\n create_project_if_needed(tr)\n exp = tr.createNewExperiment(EXPERIMENT_ID, 'DEFAULT_EXPERIMENT')\n \n return exp", "def add_experiment(redis, name):\n\n if not ALLOWED_NAMES.match(name):\n raise ExperimentException(name, \"Illegal name\")\n if redis.exists(EXPERIMENT_REDIS_KEY_TEMPLATE % name):\n raise ExperimentException(name, \"Already exists\")\n\n json = dict(creation_date=util.unicode_type(datetime.datetime.now()))\n pipe = redis.pipeline(transaction=True)\n pipe.sadd(ACTIVE_EXPERIMENTS_REDIS_KEY, name)\n pipe.hset(EXPERIMENT_REDIS_KEY_TEMPLATE % name, \"metadata\", escape.json_encode(json))\n pipe.execute()\n return Experiment(redis, name)", "def experiment_measurements_index(fun, num_measurements, sd, num_trials, seed=21):\n experiments = {}\n solutions = {}\n for ns in num_measurements:\n ratios = []\n mud_solutions = []\n for t in range(num_trials):\n np.random.seed(seed+t)\n _r = fun(sd=sd, num_obs=ns)\n ratios.append(_r)\n mud_solutions.append(np.argmax(_r))\n experiments[ns] = ratios\n solutions[ns] = mud_solutions\n \n return experiments, solutions", "def get_experiments_dict(active=True):\n return dict((experiment.name, experiment) for experiment in get_experiments(redis, active=active))", "def collect_experiment_data(self, dedupe=False):\n data = {}\n\n for exp in self.experiments.values():\n expdir = os.path.join(self.datadir, exp.name)\n goals = sorted(\n [goal\n for goal, experiments in self.experiments_by_goal.items()\n if exp in experiments])\n data[exp.name] = {'goals': goals, 'variants': {}}\n\n for variant in exp.variants:\n path = partial(os.path.join, expdir, variant)\n if dedupe:\n trial_identities = get_identities(path('__all__'))\n trialc = len(trial_identities)\n else:\n trialc = count_entries(path('__all__'), dedupe=False)\n data[exp.name]['variants'][variant] = vdata = {'trials': trialc, 'goals': {}}\n\n for goal in goals:\n vdata['goals'][goal] = goaldata = {}\n if dedupe:\n conv_identities = trial_identities.intersection(\n get_identities(path(goal)))\n convc = len(conv_identities)\n else:\n convc = count_entries(path(goal), dedupe=False)\n goaldata['conversions'] = convc\n goaldata['rate'] = (float(convc) / trialc\n if trialc\n else float('nan'))\n return data", "def experiments_init(self):\n pass", "def create_experiment_v1(self, skill_id, create_experiment_request, **kwargs):\n # type: (str, CreateExperimentRequest_abced22d, **Any) -> Union[ApiResponse, object, StandardizedError_f5106a89, BadRequestError_f854b05]\n operation_name = \"create_experiment_v1\"\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'skill_id' is set\n if ('skill_id' not in params) or (params['skill_id'] is None):\n raise ValueError(\n \"Missing the required parameter `skill_id` when calling `\" + operation_name + \"`\")\n # verify the required parameter 'create_experiment_request' is set\n if ('create_experiment_request' not in params) or (params['create_experiment_request'] is None):\n raise ValueError(\n \"Missing the required parameter `create_experiment_request` when calling `\" + operation_name + \"`\")\n\n resource_path = '/v1/skills/{skillId}/experiments'\n resource_path = resource_path.replace('{format}', 'json')\n\n path_params = {} # type: Dict\n if 'skill_id' in params:\n path_params['skillId'] = params['skill_id']\n\n query_params = [] # type: List\n\n header_params = [] # type: List\n\n body_params = None\n if 'create_experiment_request' in params:\n body_params = params['create_experiment_request']\n header_params.append(('Content-type', 'application/json'))\n header_params.append(('User-Agent', self.user_agent))\n\n # Response Type\n full_response = False\n if 'full_response' in params:\n full_response = params['full_response']\n\n # Authentication setting\n access_token = self._lwa_service_client.get_access_token_from_refresh_token()\n authorization_value = \"Bearer \" + access_token\n header_params.append(('Authorization', authorization_value))\n\n error_definitions = [] # type: List\n error_definitions.append(ServiceClientResponse(response_type=None, status_code=201, message=\"Experiment created. Returns the generated experiment identifier in &#39;Location&#39; header.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.bad_request_error.BadRequestError\", status_code=400, message=\"Server cannot process the request due to a client error.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=401, message=\"The auth token is invalid/expired or doesn&#39;t have access to the resource.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.bad_request_error.BadRequestError\", status_code=403, message=\"The operation being requested is not allowed.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=404, message=\"The resource being requested is not found.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=429, message=\"Exceeds the permitted request limit. Throttling criteria includes total requests, per API, ClientId, and CustomerId.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=500, message=\"Internal Server Error.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=503, message=\"Service Unavailable.\"))\n\n api_response = self.invoke(\n method=\"POST\",\n endpoint=self._api_endpoint,\n path=resource_path,\n path_params=path_params,\n query_params=query_params,\n header_params=header_params,\n body=body_params,\n response_definitions=error_definitions,\n response_type=None)\n\n if full_response:\n return api_response\n \n return None", "def experiment_measurements(fun, num_measurements, sd, num_trials, seed=21):\n experiments = {}\n solutions = {}\n for ns in num_measurements:\n discretizations = []\n mud_solutions = []\n for t in range(num_trials):\n np.random.seed(seed+t)\n _d = fun(sd=sd, num_obs=ns)\n discretizations.append(_d)\n mud_solutions.append(_d.mud_point())\n experiments[ns] = discretizations\n solutions[ns] = mud_solutions\n \n return experiments, solutions", "def create_subexperiments(self):\n subexperiments = {}\n for label, df in self.design.groupby(level=0):\n subexperiments[label] = SubExperiment(label, df.loc[label], self.root)\n return subexperiments", "def load_expdict(params, e, expdict, _DEFAULT_VIDDIR):\n _DEFAULT_NPY_DIR = 'npy_volumes'\n exp = params.copy()\n exp = make_paths_safe(exp)\n exp[\"label3d_file\"] = expdict[\"label3d_file\"]\n exp[\"base_exp_folder\"] = os.path.dirname(exp[\"label3d_file\"])\n\n if \"viddir\" not in expdict:\n # if the videos are not at the _DEFAULT_VIDDIR, then it must\n # be specified in the io.yaml experiment portion\n exp[\"viddir\"] = os.path.join(exp[\"base_exp_folder\"], _DEFAULT_VIDDIR)\n else:\n exp[\"viddir\"] = expdict[\"viddir\"]\n print(\"Experiment {} using videos in {}\".format(e, exp[\"viddir\"]))\n\n l3d_camnames = io.load_camnames(expdict[\"label3d_file\"])\n if \"camnames\" in expdict:\n exp[\"camnames\"] = expdict[\"camnames\"]\n elif l3d_camnames is not None:\n exp[\"camnames\"] = l3d_camnames\n print(\"Experiment {} using camnames: {}\".format(e, exp[\"camnames\"]))\n\n # Use the camnames to find the chunks for each video\n chunks = {}\n for name in exp[\"camnames\"]:\n if exp[\"vid_dir_flag\"]:\n camdir = os.path.join(exp[\"viddir\"], name)\n else:\n camdir = os.path.join(exp[\"viddir\"], name)\n intermediate_folder = os.listdir(camdir)\n camdir = os.path.join(camdir, intermediate_folder[0])\n video_files = os.listdir(camdir)\n video_files = [f for f in video_files if \".mp4\" in f]\n video_files = sorted(video_files, key=lambda x: int(x.split(\".\")[0]))\n chunks[str(e) + \"_\" + name] = np.sort(\n [int(x.split(\".\")[0]) for x in video_files]\n )\n exp[\"chunks\"] = chunks\n print(chunks)\n\n # For npy volume training\n if params[\"use_npy\"]:\n exp[\"npy_vol_dir\"] = os.path.join(exp[\"base_exp_folder\"], _DEFAULT_NPY_DIR)\n return exp", "def decode(self):\n # Extract all the experiments\n\n # Map of imageset/scan pairs\n imagesets = {}\n\n # For every experiment, use the given input to create\n # a sensible experiment.\n el = ExperimentList()\n for eobj in self._obj[\"experiment\"]:\n\n # Get the models\n identifier = eobj.get(\"identifier\", \"\")\n beam = self._lookup_model(\"beam\", eobj)\n detector = self._lookup_model(\"detector\", eobj)\n goniometer = self._lookup_model(\"goniometer\", eobj)\n scan = self._lookup_model(\"scan\", eobj)\n crystal = self._lookup_model(\"crystal\", eobj)\n profile = self._lookup_model(\"profile\", eobj)\n scaling_model = self._lookup_model(\"scaling_model\", eobj)\n\n key = (eobj.get(\"imageset\"), eobj.get(\"scan\"))\n\n imageset = None\n try:\n imageset = imagesets[key] # type: ImageSet\n except KeyError:\n # This imageset hasn't been loaded yet - create it\n imageset_data = self._lookup_model(\"imageset\", eobj)\n\n # Create the imageset from the input data\n if imageset_data is not None:\n if \"params\" in imageset_data:\n format_kwargs = imageset_data[\"params\"]\n else:\n format_kwargs = {}\n\n # Load the external lookup data\n mask_filename, mask = self._load_pickle_path(imageset_data, \"mask\")\n gain_filename, gain = self._load_pickle_path(imageset_data, \"gain\")\n pedestal_filename, pedestal = self._load_pickle_path(\n imageset_data, \"pedestal\"\n )\n dx_filename, dx = self._load_pickle_path(imageset_data, \"dx\")\n dy_filename, dy = self._load_pickle_path(imageset_data, \"dy\")\n\n if imageset_data[\"__id__\"] == \"ImageSet\":\n imageset = self._make_stills(\n imageset_data, format_kwargs=format_kwargs\n )\n elif imageset_data[\"__id__\"] == \"ImageGrid\":\n imageset = self._make_grid(\n imageset_data, format_kwargs=format_kwargs\n )\n elif (\n imageset_data[\"__id__\"] == \"ImageSequence\"\n or imageset_data[\"__id__\"] == \"ImageSweep\"\n ):\n imageset = self._make_sequence(\n imageset_data,\n beam=beam,\n detector=detector,\n goniometer=goniometer,\n scan=scan,\n format_kwargs=format_kwargs,\n )\n elif imageset_data[\"__id__\"] == \"MemImageSet\":\n imageset = self._make_mem_imageset(imageset_data)\n else:\n raise RuntimeError(\"Unknown imageset type\")\n\n if imageset is not None:\n # Set the external lookup\n if mask is None:\n mask = ImageBool()\n else:\n mask = ImageBool(mask)\n if gain is None:\n gain = ImageDouble()\n else:\n gain = ImageDouble(gain)\n if pedestal is None:\n pedestal = ImageDouble()\n else:\n pedestal = ImageDouble(pedestal)\n if dx is None:\n dx = ImageDouble()\n else:\n dx = ImageDouble(dx)\n if dy is None:\n dy = ImageDouble()\n else:\n dy = ImageDouble(dy)\n\n if not imageset.external_lookup.mask.data.empty():\n if not mask.empty():\n mask = tuple(m.data() for m in mask)\n for m1, m2 in zip(\n mask, imageset.external_lookup.mask.data\n ):\n m1 &= m2.data()\n imageset.external_lookup.mask.data = ImageBool(mask)\n else:\n imageset.external_lookup.mask.data = mask\n imageset.external_lookup.mask.filename = mask_filename\n imageset.external_lookup.gain.data = gain\n imageset.external_lookup.gain.filename = gain_filename\n imageset.external_lookup.pedestal.data = pedestal\n imageset.external_lookup.pedestal.filename = pedestal_filename\n imageset.external_lookup.dx.data = dx\n imageset.external_lookup.dx.filename = dx_filename\n imageset.external_lookup.dy.data = dy\n imageset.external_lookup.dy.filename = dy_filename\n\n # Update the imageset models\n if isinstance(imageset, ImageSequence):\n imageset.set_beam(beam)\n imageset.set_detector(detector)\n imageset.set_goniometer(goniometer)\n imageset.set_scan(scan)\n elif isinstance(imageset, (ImageSet, ImageGrid)):\n for i in range(len(imageset)):\n imageset.set_beam(beam, i)\n imageset.set_detector(detector, i)\n imageset.set_goniometer(goniometer, i)\n imageset.set_scan(scan, i)\n\n imageset.update_detector_px_mm_data()\n\n # Add the imageset to the dict - even if empty - as this will\n # prevent a duplicated attempt at reconstruction\n imagesets[key] = imageset\n\n # Append the experiment\n el.append(\n Experiment(\n imageset=imageset,\n beam=beam,\n detector=detector,\n goniometer=goniometer,\n scan=scan,\n crystal=crystal,\n profile=profile,\n scaling_model=scaling_model,\n identifier=identifier,\n )\n )\n\n # Return the experiment list\n return el", "def AcceleratorExperiments(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('accelerator_experiments', default)\n return [HEP.AcceleratorExperimentObject(i) for i in tmp]", "def expid(val,expt_name=None):\n global experiment_name\n if not expt_name:\n assert experiment_name, \"Must set experiment name\"\n expt_name = experiment_name\n return \"{}_{}\".format(expt_name, val)", "def prepare_experiment_data(protein=0, fingerprint=4, n_folds=10, seed=0):\n np.random.seed(seed)\n X, Y = load_svmlight_file(os.path.join(c[\"DATA_DIR\"], \\\n proteins[protein]+\"_\"+fingerprints[fingerprint]+\".libsvm\"))\n\n folds = construct_folds(protein=protein, fingerprint=fingerprint, n_folds=n_folds, seed=seed)\n D = {\"folds\": folds, \"X\":X, \"Y\":Y}, {\"examples\":X.shape[0]}\n\n return D", "def test_create_experiment_hit_no_config(self):\n with OrionState(experiments=[config]) as cfg:\n experiment = create_experiment(config[\"name\"], storage=cfg.storage_config)\n\n assert experiment.name == config[\"name\"]\n assert experiment.version == 1\n assert experiment.space.configuration == config[\"space\"]\n assert experiment.algorithm\n assert experiment.algorithm.configuration == config[\"algorithm\"]\n assert experiment.max_trials == config[\"max_trials\"]\n assert experiment.max_broken == config[\"max_broken\"]\n assert experiment.working_dir == config[\"working_dir\"]", "def instruments():\n instr_dict = {}\n #\n instr_dict['LRISr'] = 2**0\n instr_dict['LRISb'] = 2**1\n instr_dict['Kastb'] = 2**2\n instr_dict['shane_kast_red'] = 2**3\n instr_dict['shane_kast_red_ret'] = 2**3\n instr_dict['DEIMOS'] = 2**4\n instr_dict['NIRSPEC'] = 2**5\n instr_dict['GMOS'] = 2**6\n instr_dict['DBSP'] = 2**7\n #\n return instr_dict", "def load_experiments(filename):\n fp = open(filename)\n experiment_names = None\n try:\n experiment_names = simplejson.load(fp)\n except Exception, e:\n l.error(\"Unable to parse experiment file %s: %s\" % (filename, e))\n raise e\n finally:\n fp.close()\n \n for entry in experiment_names:\n for key in entry.keys():\n if key not in ExperimentLoader.ALLOWED_ATTRIBUTES:\n l.warning(\"Ignoring unrecognized key %s on experiment \"\n \"definition %s in filename %s\" %\n (key, entry, filename))\n if ExperimentLoader.NAME_ATTRIBUTE in entry:\n Experiment.objects.get_or_create(\n name=entry.get(ExperimentLoader.NAME_ATTRIBUTE))\n else:\n l.warning(\"Invalid entry in experiment file %s : %s\" %\n (filename, entry))", "def experiment_params():\n exp = {\n 'lr': [1e-3],\n 'loss_function': ['cce'],\n 'optimizer': ['nadam'],\n 'dataset': [\n # 'curv_contour_length_9',\n 'curv_contour_length_14',\n # 'curv_baseline',\n ]\n }\n exp['data_augmentations'] = [\n [\n 'grayscale',\n 'left_right',\n 'up_down',\n 'uint8_rescale',\n 'singleton',\n 'resize',\n # 'per_image_standardization',\n 'zero_one'\n ]]\n exp['val_augmentations'] = exp['data_augmentations']\n exp['batch_size'] = 32 # Train/val batch size.\n exp['epochs'] = 16\n exp['exp_name'] = 'hgru_bn_pathfinder_14'\n exp['model_name'] = 'hgru'\n # exp['clip_gradients'] = 7.\n exp['save_weights'] = True\n exp['validation_iters'] = 1000\n exp['num_validation_evals'] = 50\n exp['shuffle_val'] = True # Shuffle val data.\n exp['shuffle_train'] = True\n return exp", "def run(config):\n locator = cea.inputlocator.InputLocator(config.scenario)\n print('Key in run')\n print(config.bigmacc.key)\n i = config.bigmacc.key\n print(i)\n # SCENARIO SETUP ---\n config.general.project = os.path.join(config.bigmacc.data, config.general.parent, i)\n print(config.general.project)\n cea.datamanagement.data_initializer.main(config)\n # use the scenario code to set the year for the lca and other operations that need the current year\n pathway_code = config.general.parent\n pathway_items = pathway_code.split('_')\n scenario_year = int(pathway_items[1])\n config.emissions.year_to_calculate = scenario_year\n\n bigmacc_outputs_path = os.path.join(config.bigmacc.data, config.general.parent, 'bigmacc_out', config.bigmacc.round)\n\n scen_check = pd.read_csv(os.path.join(bigmacc_outputs_path, 'logger.csv'), index_col='Unnamed: 0')\n experiment_key = 'exp_{}'.format(i)\n print(experiment_key)\n keys = [int(x) for x in str(i)]\n if experiment_key in scen_check['Experiments'].values.tolist():\n print('Experiment was finished previously, moving to next.')\n pass\n else:\n print('START: experiment {}.'.format(i))\n\n # INITIALIZE TIMER ---\n t0 = time.perf_counter()\n if os.path.exists(os.path.join(config.bigmacc.data, config.general.parent, i)):\n print(' - Folder exists for experiment {}.'.format(i))\n else:\n os.mkdir(os.path.join(config.bigmacc.data, config.general.parent, i))\n print(' - Folder does not exist for experiment {}, creating now.'.format(i))\n\n # run the archetype mapper to leverage the newly loaded typology file and set parameters\n print(' - Running archetype mapper for experiment {} to remove changes made in the last experiment.'.format(i))\n cea.datamanagement.archetypes_mapper.main(config)\n\n # run the rule checker to set the scenario parameters\n print(' - Running rule checker for experiment {}.'.format(i))\n cea.bigmacc.bigmacc_rules.main(config)\n\n # SIMULATIONS ---\n\n print(' - Run radiation is {}.'.format(config.bigmacc.runrad))\n print(' - Write sensor data is {}.'.format(config.radiation.write_sensor_data))\n # checking on need for radiation simulation\n\n if config.bigmacc.runrad == True:\n # this nested statement is for when we rerun the simulations and no longer need to run the unique radiation\n if config.bigmacc.rerun != True:\n print(' - Running radiation simulation for experiment {}.'.format(i))\n if os.path.exists(locator.get_radiation_building('B000')):\n print(' - Radiation folder exists for experiment {}, copying.'.format(i))\n else:\n print(' - Radiation running for experiment {}.'.format(i))\n cea.resources.radiation_daysim.radiation_main.main(config)\n else:\n # print(' - Copying radiation simulation data from previous run for experiment {}.'.format(i))\n old_rad_files = os.path.join(config.bigmacc.data, config.general.parent, i,\n config.general.scenario_name, 'outputs', 'data', 'solar-radiation')\n # distutils.dir_util.copy_tree(old_rad_files, locator.get_solar_radiation_folder())\n else:\n radfiles = config.bigmacc.copyrad\n # print(' - Copying radiation results from {}.'.format(radfiles))\n # distutils.dir_util.copy_tree(radfiles, locator.get_solar_radiation_folder())\n print(' - Experiment {} does not require new radiation simulation.'.format(i))\n\n # running demand forecasting\n if os.path.exists(locator.get_schedule_model_file('B000')):\n print(' - Schedules exist for experiment {}.'.format(i))\n else:\n print(' - Schedule maker running for experiment {}.'.format(i))\n schedule_maker.main(config)\n\n # check to see if we need to rerun demand or if we can copy\n if config.bigmacc.rerun != True:\n print(' - Running demand simulation for experiment {}.'.format(i))\n cea.demand.demand_main.main(config)\n else:\n if keys[0] == 1:\n print(' - Running demand simulation for experiment {}.'.format(i))\n cea.demand.demand_main.main(config)\n elif keys[6] == 1:\n print(' - Running demand simulation for experiment {}.'.format(i))\n cea.demand.demand_main.main(config)\n else:\n cea.demand.demand_main.main(config)\n # print(' - Looking for demand results data from previous run for experiment {}.'.format(i))\n # old_demand_files = os.path.join(config.bigmacc.data, config.general.parent, i,\n # config.general.scenario_name, 'outputs', 'data', 'demand')\n # if os.path.exists(old_demand_files):\n # # print(' - Copy demand results files from previous run of experiment {}.'.format(i))\n # # distutils.dir_util.copy_tree(old_demand_files, locator.get_demand_results_folder())\n # pass\n # else:\n # print(' - No results found.')\n # print(' - Running demand simulation for experiment {}.'.format(i))\n # cea.demand.demand_main.main(config)\n\n if config.bigmacc.pv == True:\n print(' - Run PV is {}.'.format(config.bigmacc.pv))\n if config.bigmacc.rerun == True:\n print(' - Looking for radiation simulation data from previous run for experiment {}.'.format(i))\n old_pv_files = os.path.join(config.bigmacc.data, config.general.parent, i,\n config.general.scenario_name, 'outputs', 'data', 'potentials', 'solar')\n if os.path.exists(old_pv_files):\n # print(' - Copying PV files from previous run of experiment {}.'.format(i))\n # distutils.dir_util.copy_tree(old_pv_files, locator.solar_potential_folder())\n pass\n else:\n print(' - PV files do not exist for previous run of experiment {} at {}.'.format(i, old_pv_files))\n print(' - Running PV simulation for experiment {}.'.format(i))\n photovoltaic.main(config)\n else:\n # if PV simulation is needed, run it.\n print(' - Running PV simulation for experiment {}.'.format(i))\n photovoltaic.main(config)\n\n print('Run water-body exchange is {}.'.format(config.bigmacc.water))\n # if water-body simulation is needed, run it.\n if config.bigmacc.water == True:\n print(' - Running water body simulation for experiment {}.'.format(i))\n water.main(config)\n\n # recalculating the supply split between grid and ng in the websrook DH\n if keys[4] == 1:\n print(' - Do not run district heat recalculation.')\n else:\n print(' - Run district heat recalculation.')\n cea.bigmacc.wesbrook_DH.main(config)\n\n if keys[7] == 1:\n print(' - PV use detected. Adding PV generation to demand files.')\n util.write_pv_to_demand(config)\n else:\n print(' - No PV use detected.')\n\n # running the emissions and costing calculations\n print(' - Run cost and emissions scripts.')\n cea.analysis.costs.system_costs.main(config)\n cea.analysis.lca.main.main(config)\n\n # clone out the simulation inputs and outputs directory\n print(' - Transferring results directory for experiment {}.'.format(i))\n\n new_inputs_path = os.path.join(config.bigmacc.data, config.general.parent, i,\n config.general.scenario_name, 'inputs')\n new_outputs_path = os.path.join(config.bigmacc.data, config.general.parent, i,\n config.general.scenario_name, 'outputs', 'data')\n\n if config.bigmacc.rerun != True:\n distutils.dir_util.copy_tree(locator.get_data_results_folder(), new_outputs_path)\n distutils.dir_util.copy_tree(locator.get_input_folder(), new_inputs_path)\n\n time_elapsed = time.perf_counter() - t0\n\n # save log information\n log_df = pd.read_csv(os.path.join(bigmacc_outputs_path, 'logger.csv'),\n index_col='Unnamed: 0')\n log_df = log_df.append(pd.DataFrame({'Experiments': 'exp_{}'.format(i),\n 'Completed': 'True',\n 'Experiment Time': '%d.2 seconds' % time_elapsed,\n 'Unique Radiation': config.bigmacc.runrad}, index=[0]), ignore_index=True)\n log_df.to_csv(os.path.join(bigmacc_outputs_path, 'logger.csv'))\n log_df.to_csv(r\"C:\\Users\\justi\\Desktop\\126logger_backup.csv\", )\n\n # write netcdf of hourly_results\n netcdf_writer.main(config, time='hourly')\n\n if config.bigmacc.rerun != True:\n shutil.rmtree(locator.get_costs_folder())\n shutil.rmtree(locator.get_demand_results_folder())\n shutil.rmtree(locator.get_lca_emissions_results_folder())\n shutil.rmtree(locator.get_solar_radiation_folder())\n shutil.rmtree(locator.get_potentials_folder())\n else:\n print(' - Rerun does not require purging of the files.')\n\n # when the setpoint is changed it is in a deeper database than the archetypes mapper can reach so reset it here\n if keys[0] == 1:\n cea.datamanagement.data_initializer.main(config)\n else:\n pass\n print('END: experiment {}. \\n'.format(i))", "def three_experiments(two_experiments, one_experiment):", "def createDictBase(self):\n #allFiles = glob.glob(self.path + \"/*\"+ self.filetype)\n #data = pd.read_excel(allFiles[0])\n#================================================================================================================== \n# self.list_files = self.Files_to_import()\n# data=pd.read_excel(self.path +'/'+self.list_files[0]) # importing the first excel sheet from the first/zero time point\n self.list_files = self.Files_to_import()\n try:\n tim = pd.read_excel(self.path +'/timePoints' + self.filetype) # importin the time points from a shhet called time_points\n time = np.array(tim['time']) # assigning variable time conataing an array with the timepoints\n self.nr_files = len(time)\n except:\n time = np.array(list(range(self.nr_files))) \n \n data=pd.read_excel(self.path +'/'+self.list_files[0])\n \n data=np.array(data) # converts it to array, so we can manipualte the data easier\n #python wants for some reason first to create the dictionary with at least on value before we can run it in a loop. THat is why we have litle redundancy, since the next part is allmost the same.\n for i in range(len(data)): # the numbers of rows. Goes through the rows\n for ii in range(len(data[i])): # the numbers of columns. For every row goes through the columns\n cell_id=str(i)+str(ii) # we create a variable that has a value cell_id= rowNUm colNUm, for example x= '34' means row 3 column 4\n dat=[] # a list that will contain the first value of the cell. It will be cleaned every time the loop runs the newxt value\n dat.append(data[i][ii]) # we put the value of the well to the list\n self.dbase[cell_id]=dat # the list is put to the table. For example dabse['cell_id']= some OD value \n \n # then we go through the rest of the excell time points and collect them\n for i in range(1,len(time)): \n if self.list_files[i] != 0:\n \n #data = pd.read_excel(allFiles[i])\n data=pd.read_excel(self.path +'/'+ self.list_files[i]) \n data=np.array(data)\n for i in range(len(data)): # the numbers of rows. Goes through the rows\n for ii in range(len(data[i])): # the numbers of columns. For every row goes through the columns\n cell_id=str(i)+str(ii) # we create a variable that has a value cell_id= rowNUm colNUm, for example x= '34' means row 3 column 4\n \n tempVar=self.dbase[cell_id] # here we use a method of exchanging variables to be able to uppdate the cloumn corresponding to the cell_id\n tempVar.append(data[i][ii]) # add the new data to the copy\n self.dbase[cell_id] = tempVar # uppdate the original dictionary\n else:\n pass\n self.dbase['time'] = time # at theend we add a column that takes care of the time_points \n return self.dbase", "def gather_experiment_parameters(self):\n consts = win32com.client.constants.__dicts__[0]\n exp_params = [r for r in consts.keys() if len(r.split(\"EXP_\")) > 1]\n dm_params = [r for r in consts.keys() if len(r.split(\"DM_\")) > 1]\n self.app_param = {} \n self.appdoc_param = {} \n for p in exp_params:\n self.app_param.update({p:self.app.GetParam(consts[p])})\n\n for p in dm_params:\n #self.appdoc_param.update({p:self.app.GetParam(consts[p])}) bug? call appdoc? CP\n\n self.appdoc_param.update({p:self.app.GetParam(consts[p])})", "def populate_db(self, namedict, experiment_link=False):\n namedict = self.fix_namedict(namedict, 'experiments')\n if not experiment_link:\n self.cur.executemany(\n \"\"\"\n INSERT INTO experiments\n (\n experiment_name,\n model_struct,\n loss_function,\n regularization_type,\n regularization_strength,\n optimizer,\n lr,\n dataset,\n regularization_type_domain,\n regularization_strength_domain,\n optimizer_domain,\n lr_domain,\n timesteps,\n timesteps_domain,\n u_t_domain,\n q_t_domain,\n t_t_domain,\n p_t_domain,\n u_t,\n q_t,\n t_t,\n p_t,\n hp_optim,\n hp_max_studies,\n hp_current_iteration,\n experiment_iteration,\n normalize_labels,\n filter_size,\n filter_size_domain\n )\n VALUES\n (\n %(experiment_name)s,\n %(model_struct)s,\n %(loss_function)s,\n %(regularization_type)s,\n %(regularization_strength)s,\n %(optimizer)s,\n %(lr)s,\n %(dataset)s,\n %(regularization_type_domain)s,\n %(regularization_strength_domain)s,\n %(optimizer_domain)s,\n %(lr_domain)s,\n %(timesteps)s,\n %(timesteps_domain)s,\n %(u_t_domain)s,\n %(q_t_domain)s,\n %(t_t_domain)s,\n %(p_t_domain)s,\n %(u_t)s,\n %(q_t)s,\n %(t_t)s,\n %(p_t)s,\n %(hp_optim)s,\n %(hp_max_studies)s,\n %(hp_current_iteration)s,\n %(experiment_iteration)s,\n %(normalize_labels)s,\n %(filter_size)s,\n %(filter_size_domain)s\n )\n \"\"\",\n namedict)\n self.cur.execute(\n \"\"\"\n UPDATE experiments\n SET experiment_link=_id\n WHERE experiment_name=%(experiment_name)s\n \"\"\",\n namedict[0])\n else:\n self.cur.executemany(\n \"\"\"\n INSERT INTO experiments\n (\n experiment_name,\n model_struct,\n loss_function,\n regularization_type,\n regularization_strength,\n optimizer,\n lr,\n dataset,\n regularization_type_domain,\n regularization_strength_domain,\n optimizer_domain,\n lr_domain,\n timesteps,\n timesteps_domain,\n u_t_domain,\n q_t_domain,\n t_t_domain,\n p_t_domain,\n u_t,\n q_t,\n t_t,\n p_t,\n hp_optim,\n hp_max_studies,\n hp_current_iteration,\n experiment_iteration,\n normalize_labels,\n filter_size,\n filter_size_domain,\n experiment_link\n )\n VALUES\n (\n %(experiment_name)s,\n %(model_struct)s,\n %(loss_function)s,\n %(regularization_type)s,\n %(regularization_strength)s,\n %(optimizer)s,\n %(lr)s,\n %(dataset)s,\n %(regularization_type_domain)s,\n %(regularization_strength_domain)s,\n %(optimizer_domain)s,\n %(lr_domain)s,\n %(timesteps)s,\n %(timesteps_domain)s,\n %(u_t_domain)s,\n %(q_t_domain)s,\n %(t_t_domain)s,\n %(p_t_domain)s,\n %(u_t)s,\n %(q_t)s,\n %(t_t)s,\n %(p_t)s,\n %(hp_optim)s,\n %(hp_max_studies)s,\n %(hp_current_iteration)s,\n %(experiment_iteration)s,\n %(normalize_labels)s,\n %(filter_size)s,\n %(filter_size_domain)s,\n %(experiment_link)s\n )\n \"\"\",\n namedict)\n if self.status_message:\n self.return_status('INSERT')" ]
[ "0.67150646", "0.58456343", "0.57081664", "0.554345", "0.5494117", "0.548862", "0.5465825", "0.5458093", "0.5454368", "0.545273", "0.5450147", "0.5429693", "0.54195243", "0.5378443", "0.5368457", "0.5343318", "0.53421944", "0.5336246", "0.5334241", "0.53309494", "0.53131723", "0.5309926", "0.52775615", "0.52684265", "0.525254", "0.5241784", "0.5235696", "0.5229722", "0.52004635", "0.51939106" ]
0.59251714
1
This method combines the previuos two methods exper and ReplicaStats end returns a list with means and std for each and every replicate
def Means_Stds(self): self.means=[] # list taking care for the means of ll experiments self.stds=[] # list taking care fro the Stds of all experiments for replica in self.exper(): # remember self.exper, from above returns ListExperiments mean, Std = self._ReplicaStats(replica.T) # here calculates the means and Stds. WE have to transpose the matrix. .T stands for transpose self.means.append(mean) # the calculted data for each experiment is gethered in one place self.stds.append(Std) #print(self.means, self.stds) return self.means, self.stds
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _ReplicaStats(self, myreplica):\n \n means=[None]*len(myreplica) # creating an empty list for the means with the length of my timepoints indexes\n std=[None]*len(myreplica) # creating an empty list for the std\n for i in range(len(myreplica)):\n means[i]=np.mean(myreplica[i]) # numpy is calculating the means and std for every row and then add it to the list\n std[i]=np.std(myreplica[i])\n #print(means, std)\n return means, std", "def get_summarized_results(self):\n stats = [v.stats() for (k, v) in self.examples.items() if v.is_ready()]\n res = self.ExampleClass.average_stats(stats)\n\n res['loss'] = self.loss/self.loss_cnt\n res['recent_loss'] = sum(self.recent_loss_array) / sum(self.recent_loss_bs_array)\n\n return res", "def meanstd(self):\n\t\tmean = [125.3, 123.0, 113.9] # R,G,B\n\t\tstddev = [63.0, 62.1, 66.7] # R,G,B\n\t\treturn [mean, stddev]", "def _loss_std_mean(self, iterations):\n\n loss_array = np.array(self._loss_list[-iterations:])\n return loss_array.mean(), loss_array.std()", "def dist_stats(self,nn_list):\n\n nn_list = np.array(nn_list)\n d = self.dist[:,nn_list-1]\n\n mean = np.mean(d,axis=0)\n std = np.std(d,axis=0)\n\n return mean, std", "def _get_normalisation_stats(self):\n p_net_datasets = [self.pdf_dataset] + [self.PDE_dataset] + [self.BC_dataset]\n p_net_means, p_net_stds = get_mean_std_from_datasets(p_net_datasets)\n\n D_net_datasets = [self.PDE_dataset]\n D_net_means, D_net_stds = get_mean_std_from_datasets(D_net_datasets)\n\n U_net_datasets = [self.PDE_dataset]\n U_net_means, U_net_stds = get_mean_std_from_datasets(U_net_datasets)\n\n return p_net_means, p_net_stds, D_net_means, D_net_stds, U_net_means, U_net_stds", "def dist_stats(self,nn_list):\n nn_list = np.array(nn_list)-1\n d = self.dist[:,nn_list]\n\n mean = np.mean(d,axis=0)\n std = np.std(d,axis=0)\n\n return mean, std", "def compute_training_stats():\n means, stds = [], []\n data = SUNRGBDTrainDataset(True)\n for i in range(len(data)):\n print(i)\n img, _ = data[i]\n std, mean = t.std_mean(input=img, dim=(1, 2))\n means.append(mean)\n stds.append(std)\n means = t.sum(t.vstack(means), dim=0) / len(means)\n stds = t.sum(t.vstack(stds), dim=0) / len(stds)\n print(means, stds)", "def aggregate_stats(self):\n if self.split_bn.track_running_stats:\n (\n self.bn.running_mean.data,\n self.bn.running_var.data,\n ) = self._get_aggregated_mean_std(\n self.split_bn.running_mean,\n self.split_bn.running_var,\n self.num_splits,\n )", "def compute_means(opts, train_data, sampler):\n exp_names = train_data[\"exp_names\"].value\n means = []\n stds = []\n if opts[\"flags\"].normalize is True:\n running_stats = []\n # a running stat for each channel\n running_stats = RunningStats(3)\n # loop over the experiments\n\n # for exp_name in exp_names:\n for j in range(0, len(exp_names), 2):\n batch = sampler.get_minibatch()\n exp_name = batch[2][0]\n print(exp_name)\n # loop over the keys\n\n seq_len = train_data[\"exps\"][exp_name][\"labels\"].shape[0]\n temp_feat = batch[0].cpu().numpy()\n temp_feat = temp_feat[:seq_len, :, :, :]\n\n channel_feats = []\n for i in range(3):\n # channel_feat = temp_feat[0, :, i, :]\n # sample frames\n channel_feat = temp_feat[::100, i, :]\n channel_feat = channel_feat.reshape(-1, 1)\n channel_feats.append(channel_feat)\n\n channel_feats = np.concatenate(channel_feats, axis=1)\n running_stats.add_data(\n channel_feat\n )\n\n means = running_stats.mean.tolist()\n stds = running_stats.compute_std().tolist()\n else:\n means = [.5, .5, .5]\n stds = [1, 1, 1]\n # for key in opts[\"flags\"].feat_keys:\n # temp_feat = train_data[\"exps\"][exp_names[0]][key].value\n # mean = np.zeros((temp_feat.shape[2], ))\n # std = np.ones((temp_feat.shape[2], ))\n # means.append(mean)\n # stds.append(std)\n normalize = transforms.Normalize(mean=means,\n std=stds)\n\n return normalize", "def exper(self):\n self.dbase.pop('time') # since we do not want the time data to be included in our calculation we drop it out.\n ind=list(zip(self.start, self.stop)) # here I recomend to Google; 'zip , list python' to understand what is going on :)\n Experiments={} # assigning a local dictionary variable\n for x in range(self.NrExperiments):\n Experiments[\"Experiment{0}\".format(x)]=[] # this two lines creates keys for each experment. For each experiment we are going to collect mean and Std\n \n # next passage is a little bit harsh to digest att once \n for i in range(self.NrExperiments): # we are looping n-times n=number of experiments\n for key in sorted(self.dbase.keys()): # every time we are going through each key of the dictionary with the data\n if len(key) == 2: # we check how the key looks like . If you remmember the first number of the key correspons to the row at which cells is, and \n # the second part of the key corresponds to the column the cell is comming from . For example key = '32' tells you row = 3 , column = 2\n if int(key[0]) in list(range(ind[i][0],ind[i][1])) and key[1]==str(self.col): # here we check if the first number of the key (key[0])is in the range of stat-stop row and att the same time \n # att which column key[1]. If it is in the searched column and rows we append it to the expriment of interest\n Experiments[\"Experiment{0}\".format(i)].append(self.dbase[key])\n else:\n if int(key[0]) in list(range(ind[i][0],ind[i][1])) and key[1]+key[2] ==str(self.col): # WE have columns 10, 11, 12 wich have key like for ex. key = '212' , which tells you row= 2, column = '12'\n Experiments[\"Experiment{0}\".format(i)].append(self.dbase[key]) # this is the same as above\n \n self.ListExperiments.append(np.array(Experiments[\"Experiment{0}\".format(i)])) # we collect at the end all data for our experiments in a final list 'ListExperiments'\n return self.ListExperiments", "def get_data_set_mean_and_std(self):\n cnt = 0\n fst_moment = torch.empty(3)\n snd_moment = torch.empty(3)\n\n for idx in range(self.__len__()):\n outputs = self.__getitem__(idx)\n\n # Outputs = img, label (BIPED Dataset)\n # Outputs = img_with_end_dots, classification_label, single_contour_with_end_dots\n img = outputs[0]\n\n c, h, w = img.shape\n nb_pixels = h * w\n sum_ = torch.sum(img, dim=[1, 2])\n sum_of_square = torch.sum(img ** 2, dim=[1, 2])\n fst_moment = (cnt * fst_moment + sum_) / (cnt + nb_pixels)\n snd_moment = (cnt * snd_moment + sum_of_square) / (cnt + nb_pixels)\n\n cnt += nb_pixels\n\n return fst_moment, torch.sqrt(snd_moment - fst_moment ** 2)", "def get_data_stats(sharded_list, center_at_mut=True):\n data = []\n all_elements = []\n labels = []\n\n for i, sharded in enumerate(sharded_list):\n for shard_num, shard_df in sharded.iter_shards():\n labels_df = sharded.read_shard(shard_num, key='labels')\n\n for ensemble_name, ensemble_df in shard_df.groupby(['ensemble']):\n all_elements.extend(ensemble_df.element.values)\n label_info = labels_df[labels_df.ensemble == ensemble_name].squeeze()\n\n for subunit_name in ['original', 'mutated']:\n struct_df = ensemble_df[ensemble_df.subunit == subunit_name]\n pos = struct_df[['x', 'y', 'z']].astype(np.float32)\n mutation_center = __get_mutation_center(\n struct_df, label_info, center_at_mut)\n\n max_dist = util.get_max_distance_from_center(pos, mutation_center)\n num_atoms = struct_df.shape[0]\n data.append((ensemble_name, subunit_name, max_dist, num_atoms))\n\n labels.append((i, shard_num, label_info.label))\n\n all_elements_df = pd.DataFrame(all_elements, columns=['element'])\n unique_elements = all_elements_df.element.unique()\n print('Unique elements ({:}): {:}'.format(len(unique_elements), unique_elements))\n print('\\nElement counts:')\n print(all_elements_df.element.value_counts())\n print('\\n')\n\n all_labels_df = pd.DataFrame(labels, columns=['sharded', 'shard_num', 'label'])\n print('\\nLabel by dataset:')\n print(all_labels_df.groupby(['sharded', 'shard_num']).label.value_counts())\n print('\\n')\n print(all_labels_df.label.value_counts())\n\n df = pd.DataFrame(data, columns=['ensemble', 'subunit', 'max_dist', 'num_atoms'])\n df = df.sort_values(by=['max_dist', 'num_atoms'],\n ascending=[False, False]).reset_index(drop=True)\n print(df.describe())\n\n print(df[df.max_dist < 50].shape[0]*100.0/df.shape[0])\n return df", "def meanTest(li_pre_final,li_post_final): \r\n li_add_A_pre = li_pre_final[0][0]\r\n li_add_B_pre = li_pre_final[0][1] \r\n li_add_C_pre = li_pre_final[0][2] \r\n li_add_D_pre = li_pre_final[0][3]\r\n\r\n li_upd_A_pre = li_pre_final[1][0] \r\n li_upd_B_pre = li_pre_final[1][1]\r\n li_upd_C_pre = li_pre_final[1][2]\r\n li_upd_D_pre = li_pre_final[1][3]\r\n\r\n li_rem_A_pre = li_pre_final[2][0] \r\n li_rem_B_pre = li_pre_final[2][1]\r\n li_rem_C_pre = li_pre_final[2][2]\r\n li_rem_D_pre = li_pre_final[2][3]\r\n\r\n li_add_A_post = li_post_final[0][0]\r\n li_add_B_post = li_post_final[0][1] \r\n li_add_C_post = li_post_final[0][2] \r\n li_add_D_post = li_post_final[0][3] \r\n\r\n li_upd_A_post = li_post_final[1][0] \r\n li_upd_B_post = li_post_final[1][1]\r\n li_upd_C_post = li_post_final[1][2]\r\n li_upd_D_post = li_post_final[1][3] \r\n\r\n li_rem_A_post = li_post_final[2][0] \r\n li_rem_B_post = li_post_final[2][1]\r\n li_rem_C_post = li_post_final[2][2]\r\n li_rem_D_post = li_post_final[2][3] \r\n\r\n li_p_values = [i for i in range(12)]\r\n \r\n U1, li_p_values[0] = mannwhitneyu(li_add_A_pre,li_add_A_post)\r\n U1, li_p_values[1] = mannwhitneyu(li_add_B_pre,li_add_B_post) \r\n U1, li_p_values[2] = mannwhitneyu(li_add_C_pre,li_add_C_post)\r\n U1, li_p_values[3] = mannwhitneyu(li_add_D_pre,li_add_D_post)\r\n\r\n U1, li_p_values[4] = mannwhitneyu(li_upd_A_pre,li_upd_A_post)\r\n U1, li_p_values[5] = mannwhitneyu(li_upd_B_pre,li_upd_B_post)\r\n U1, li_p_values[6] = mannwhitneyu(li_upd_C_pre,li_upd_C_post)\r\n U1, li_p_values[7] = mannwhitneyu(li_upd_D_pre,li_upd_D_post)\r\n\r\n U1, li_p_values[8] = mannwhitneyu(li_rem_A_pre,li_rem_A_post)\r\n U1, li_p_values[9] = mannwhitneyu(li_rem_B_pre,li_rem_B_post)\r\n U1, li_p_values[10] = mannwhitneyu(li_rem_C_pre,li_rem_C_post)\r\n U1, li_p_values[11] = mannwhitneyu(li_rem_D_pre,li_rem_D_post)\r\n\r\n for p_value in li_p_values:\r\n if p_value < 0.05:\r\n print(\"reject H0: statistically different\")\r\n else:\r\n print(\"accept H0: not statistically different\")", "def mean_STD(self,counter):\n \n \n pass", "def _get_aggregated_mean_std(self, means, stds, n):\n mean = means.view(n, -1).sum(0) / n\n std = (\n stds.view(n, -1).sum(0) / n\n + ((means.view(n, -1) - mean) ** 2).view(n, -1).sum(0) / n\n )\n return mean.detach(), std.detach()", "def _get_tads_mean_std(self, experiments):\n norm_tads = []\n for tad in experiments:\n for brk in self.experiments[tad]['tads'].values():\n if not brk['brk']:\n continue\n norm_tads.append(log((brk['end'] - brk['start']) * self.resolution))\n length = len(norm_tads)\n mean = sum(norm_tads)/length\n std = sqrt(sum([(t-mean)**2 for t in norm_tads])/length)\n return mean, std", "def experiment_equipment(fun, num_measure, sd_vals, num_trials, reference_value):\n sd_err = []\n sd_var = []\n for sd in sd_vals:\n temp_err = []\n for t in range(num_trials):\n d = fun(sd=sd, num_obs=num_measure)\n mud_point = d.mud_point()\n temp_err.append(np.linalg.norm(mud_point - reference_value))\n sd_err.append(np.mean(temp_err))\n sd_var.append(np.var(temp_err))\n\n return sd_err, sd_var", "def results_psavg_sims():\n posterior_means = [[1.18040327516, 7.55106444832, 3.27420103073, 3.51998795534, 0.67212630002],\n [0.619197296326, 6.49420626987, 2.22495505139, 2.27682390376, 0.678172183554],\n [0.856628471666, 5.94732402905, 3.97580346111, 3.85788708662, 0.690090617623],\n [0.774906025167, 7.34275742443, 2.69729821931, 2.97994334746, 0.663015258594]]\n\n\n sgr1900_results.results_psavg_sims(posterior_means, [5,6,8,12], \"sgr1806\")\n\n return", "def compute_stats(self, dataset, portion):\n with torch.no_grad():\n specgrams = []\n samples = 5000\n for i_batch, (mix, _, _) in enumerate(dataset):\n mix = mix[portion]\n spec = self.calculate_mag(mix, db_conversion=True)\n specgrams.append(spec)\n if (i_batch + 1) * mix.shape[0] > samples:\n break\n specgrams = torch.cat(specgrams, 0)\n self.mean.data = specgrams.mean(dim=(0, 2), keepdim=True)\n self.std.data = specgrams.std(dim=(0, 2), keepdim=True)\n None", "def analyze_results(results): #, result_nonprivate):\n res_dimensions = zip(*results)\n mean, std = [], []\n \n for resdim in res_dimensions:\n mean.append ( numpy.average(resdim) )\n std.append ( numpy.std(resdim) )\n\n return mean, std", "def divide_by_std_across_trials(self):\n if not hasattr(self, 'mean_across_trials_subtracted_data'):\n self.subtract_mean_across_trials()\n self.std_across_trials_divided_data = \\\n self.mean_across_trials_subtracted_data / \\\n np.std(self.mean_across_trials_subtracted_data,\n axis=1, keepdims=True)", "def mean_std_calc(dataloader):\n mean = 0\n std = 0\n samples = 0\n for data, _, _ in dataloader:\n batch_samples = data.size(0)\n data = data.view(batch_samples, data.size(1), -1)\n mean += data.mean(2).sum(0)\n std += data.std(2).sum(0)\n samples += batch_samples\n\n return (mean / samples),(std / samples)", "def get_mean(self):\r\n for i in range(1,len(self.data[0])):\r\n self.prom.append(np.mean(self.data[:,i]))", "def get_mean_and_std(dataset):\n dataloader = torch.utils.data.DataLoader(\n dataset, batch_size=1, shuffle=True, num_workers=2\n )\n mean = torch.zeros(3)\n std = torch.zeros(3)\n print(\"==> Computing mean and std..\")\n for inputs, targets in dataloader:\n for i in range(3):\n mean[i] += inputs[:, i, :, :].mean()\n std[i] += inputs[:, i, :, :].std()\n mean.div_(len(dataset))\n std.div_(len(dataset))\n return mean, std", "def theoretical_stats_selectivity(self) -> np.ndarray:\n warn('This method will likely be phased out', category=FutureWarning)\n grand_final = []\n all_of_it = []\n for elt in self.final_comb_table:\n for elt2 in self.mean_and_sd_dic.keys():\n if str(elt[:self.mutation_number]) == str(elt2):\n elt = np.append(elt, list(self.mean_and_sd_dic[elt2]))\n for elt3 in self.combs_only:\n if np.array_equal(elt[len(self.mutations_list)], elt3) == True:\n theor_mean = np.array([0])\n replicate_values = np.zeros((1, len(self.replicate_matrix[0])))\n for elt4 in elt3:\n target = self.mean_and_sd_array[elt4 - 1][0]\n theor_mean = np.add(theor_mean, target)\n target2 = self.replicate_matrix[elt4 - 1]\n replicate_values = np.add(replicate_values, target2)\n theor_sd = (np.std(replicate_values)) / math.sqrt(self.replicate_number)\n elt = np.append(elt, list(theor_mean))\n elt = np.append(elt, theor_sd)\n grand_final.append(elt)\n if self.verbose:\n print('mutationlist', self.mutations_list)\n print('grand_final', grand_final)\n for elt5 in grand_final:\n at_last = (elt5[len(self.mutations_list) + 1:][0]) - (elt5[len(self.mutations_list) + 1:][2])\n elt5 = np.append(elt5, at_last)\n all_of_it.append(elt5)\n return np.array(all_of_it)", "def aggregate_results(scores, stds):\n\n scores = pd.DataFrame(scores.values(),\n index=scores.keys(),\n columns=[\"SCORE MEAN\", ])\n stds = pd.DataFrame(stds.values(),\n index=stds.keys(),\n columns=[\"SCORE STD\", ])\n scores.index.name = param_names\n stds.index.name = param_names\n data = []\n for param_gropby_levels in ((0,), (1,), (2,), (3, 4), (5,)):\n aggregate_scores = scores.groupby(level=param_gropby_levels).mean()\n aggregate_stds = scores.groupby(level=param_gropby_levels).std()\n data.append((aggregate_scores, aggregate_stds))\n return data", "def _get_mean_and_log_std(self, *inputs):\n return self._shared_mean_log_std_network(*inputs)", "def compute_analysis(self):\r\n def get_mean(self):\r\n \"\"\"\r\n Compute mean in all sensors\r\n \"\"\"\r\n for i in range(1,len(self.data[0])):\r\n self.prom.append(np.mean(self.data[:,i])) \r\n\r\n \r\n def get_stddev(self):\r\n \"\"\"\r\n Compute mean in all sensors\r\n \"\"\"\r\n for i in range(1,len(self.data[0])):\r\n self.stddev.append(np.std(self.data[:,i])) \r\n \r\n # Get the values\r\n get_mean(self)\r\n get_stddev(self)\r\n \r\n # Check condition\r\n [(self.out_of_3stddev.append(i)) \r\n for (i) in (self.data[:,0:4]) \r\n if (any(\r\n (i[1:4] > 3*np.array(self.stddev)+np.array(self.prom))|\r\n (i[1:4] < -3*np.array(self.stddev)+np.array(self.prom))\r\n ))]", "def get_mean_and_std(dataloader):\n mean = torch.zeros(3)\n std = torch.zeros(3)\n len_dataset = 0\n print('==> Computing mean and std..')\n for inputs, targets in dataloader:\n len_dataset += 1\n for i in range(len(inputs[0])):\n mean[i] += inputs[:,i,:,:].mean()\n std[i] += inputs[:,i,:,:].std()\n mean.div_(len_dataset)\n std.div_(len_dataset)\n return mean, std" ]
[ "0.7879327", "0.6298081", "0.61253333", "0.6120532", "0.6087231", "0.60323876", "0.60249436", "0.6018178", "0.5919807", "0.591294", "0.58573043", "0.585431", "0.58437914", "0.57945544", "0.57920843", "0.57894504", "0.5778291", "0.57698476", "0.5765662", "0.5676729", "0.5674467", "0.56688666", "0.56465846", "0.56445754", "0.56376606", "0.5636925", "0.5604046", "0.5589746", "0.5583321", "0.55734247" ]
0.8113174
0
On fit, a distribution is created for each column along the covariance and means
def test_fit_default_distribution(self): copula = GaussianMultivariate(GaussianUnivariate) copula.fit(self.data) for i, key in enumerate(self.data.columns): assert copula.columns[i] == key assert copula.univariates[i].__class__ == GaussianUnivariate assert copula.univariates[i]._params['loc'] == self.data[key].mean() assert copula.univariates[i]._params['scale'] == np.std(self.data[key]) expected_covariance = copula._get_covariance(self.data) assert (copula.covariance == expected_covariance).all().all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fit(self, df):\n self.df_std = df.std(axis=0, skipna=True)\n self.df_mean = df.mean(axis=0, skipna=True)\n return self", "def fit ( self, X ):\n \n if self.mean:\n self.df_means = X.mean ( axis = 0 ) # Get the colwise means\n if self.std:\n self.df_std = X.std ( axis = 0 ) # Get the colwise stds", "def test_fit_distribution_arg(self):\n # Setup\n distribution = 'copulas.univariate.gaussian_kde.GaussianKDE'\n copula = GaussianMultivariate(distribution=distribution)\n\n # Run\n copula.fit(self.data)\n\n # Check\n assert copula.distribution == 'copulas.univariate.gaussian_kde.GaussianKDE'\n\n for i, key in enumerate(self.data.columns):\n assert copula.columns[i] == key\n assert get_qualified_name(copula.univariates[i].__class__) == copula.distribution\n\n expected_covariance = copula._get_covariance(self.data)\n assert (copula.covariance == expected_covariance).all().all()", "def test_fit_distribution_selector(self):\n copula = GaussianMultivariate(distribution={\n 'column1': 'copulas.univariate.beta.BetaUnivariate',\n 'column2': 'copulas.univariate.gaussian_kde.GaussianKDE',\n })\n copula.fit(self.data)\n\n assert get_qualified_name(\n copula.univariates[0].__class__) == 'copulas.univariate.beta.BetaUnivariate'\n assert get_qualified_name(\n copula.univariates[1].__class__) == 'copulas.univariate.gaussian_kde.GaussianKDE'\n assert get_qualified_name(\n copula.univariates[2].__class__) == 'copulas.univariate.base.Univariate'", "def __call__(self, mean=None, cov=1):\r\n return multivariate_normal_frozen(mean, cov)", "def fit(self, data: Optional[pd.DataFrame] = None, ndims: Optional[int] = None,\n columns: Optional[List[str]] = None,\n categorical_columns=(), verbose=False):\n\n assert ndims is not None or data is not None or self.ndims is not None or columns is not None\n\n if data is not None:\n ndims = len(data.columns)\n self.column_order = list(data.columns)\n\n if columns is not None:\n self.column_order = columns\n ndims = len(columns)\n\n if ndims is None:\n ndims = self.ndims\n\n self.mu = np.random.randn(ndims).astype(np.float32)\n self.cov = np.eye(ndims) * np.abs(\n np.random.randn(ndims).reshape(-1, 1)\n ).astype(np.float32)\n self.ndims = ndims", "def fit(self, X: np.ndarray, y: np.ndarray):\n\n # For each feature column index in X\n for col_idx in range(X.shape[1]):\n if col_idx not in self.column_distribution_map:\n raise ValueError(f\"No distribution given for column {col_idx}\")\n\n # If the column has a multinomial tag, fit a multinomial.\n if self.column_distribution_map[col_idx] == \"multinomial\":\n self.fitted_distributions[col_idx] = self._fit_multinomial(\n X=X, col_idx=col_idx, y=y\n )\n # Otherwise fit a Gaussian\n elif self.column_distribution_map[col_idx] == \"gaussian\":\n self.fitted_distributions[col_idx] = self._fit_gaussian(\n X=X, col_idx=col_idx, y=y\n )\n\n self.is_fitted = True\n # The prior P(C) gets set to multinomial with p as the\n # proportion of observations in each class C\n self.prior = stats.multinomial(\n n=len(y), p=[np.sum(y == val) / len(y) for val in sorted(set(y))]\n )", "def fit_model(self):\r\n\t\tself.mu = np.mean(self.x, axis = 0)\r\n\t\tself.sig = np.std(self.x, axis = 0)", "def fit_normal(distogram):\n L = distogram.shape[1]\n params = torch.empty((3, L, L))\n \n for i in range(L):\n for j in range(L):\n m, s = calc_moments(distogram[:, i, j])\n scalar = torch.max(distogram[:, i, j]) / normal_distr(m, m, s)\n params[0, i, j], params[1, i, j], params[2, i, j] = m, s, scalar\n \n return params", "def create_gaussian_data(self, mean, std, nPoints, nClusters, nDimension):\n dataset = np.zeros((nClusters, nPoints, nDimension), dtype=float)\n for i in range(nClusters):\n cov = std[i] ** 2\n dataset[i, :, :] = np.random.multivariate_normal(mean[i], cov, nPoints)\n\n return dataset", "def initializeDistribution(self):\n self.raiseAMessage('initialize distribution')\n mu = distribution1D.vectord_cxx(len(self.mu))\n for i in range(len(self.mu)):\n mu[i] = self.mu[i]\n covariance = distribution1D.vectord_cxx(len(self.covariance))\n for i in range(len(self.covariance)):\n covariance[i] = self.covariance[i]\n if self.method == 'spline':\n if self.covarianceType != 'abs':\n self.raiseAnError(IOError,'covariance with type ' + self.covariance + ' is not implemented for ' + self.method + ' method')\n self._distribution = distribution1D.BasicMultivariateNormal(covariance, mu)\n elif self.method == 'pca':\n self._distribution = distribution1D.BasicMultivariateNormal(covariance, mu, str(self.covarianceType), self.rank)\n if self.transformation:\n self.lowerBound = [-sys.float_info.max]*self.rank\n self.upperBound = [sys.float_info.max]*self.rank\n else:\n self.lowerBound = [self.returnLowerBound(dim) for dim in range(self.dimension)]\n self.upperBound = [self.returnUpperBound(dim) for dim in range(self.dimension)]", "def generate_distribution(self, df, var, week):\n\n # Get descriptive statistics\n mean = df[var][df['week'] == week].mean()\n std = df[var][df['week'] == week].std()\n low = df[var][df['week'] == week].min()\n high = df[var][df['week'] == week].max()\n\n # print(df[var][df['week'] == week].plot.density())\n\n # TEMPORARY: USE NORMAL DISTRIBUTION\n g = gaussian_kde(df[var][df['week'] == week])\n X = get_truncated_normal(mean=mean, sd=std, low=low, upp=high)\n # print([g(x)[0] for x in np.linspace(-2,8,10)])\n # print(list(df[var][df['week'] == week]))\n # print(X.rvs(10))\n\n return X", "def test_fit_numpy_array(self):\n # Setup\n copula = GaussianMultivariate(\n distribution='copulas.univariate.gaussian.GaussianUnivariate')\n\n # Run\n copula.fit(self.data.to_numpy())\n\n # Check\n for key, (column, univariate) in enumerate(zip(self.data.columns, copula.univariates)):\n assert univariate._params['loc'] == np.mean(self.data[column])\n assert univariate._params['scale'] == np.std(self.data[column])\n\n expected_covariance = copula._get_covariance(pd.DataFrame(self.data.to_numpy()))\n assert (copula.covariance == expected_covariance).all().all()", "def predictionDistribution(x,beta,sigma2,mu,Cov,x_train,z_train):\r\n ### TODO: Write your code here\r\n X = []\r\n for i in x:\r\n j = [1,i]\r\n X.append(j)\r\n X = np.array(X)\r\n print(\"X.shape\", X.shape)\r\n print(\"mu.sape\", mu.shape)\r\n print(\"Cov.shape \",Cov.shape)\r\n mu_new = np.matmul(X , mu)\r\n cov_new = sigma2 + np.matmul( X, np.matmul( Cov,X.T ) )\r\n var = np.sqrt(cov_new.diagonal())\r\n \r\n plt.figure(1)\r\n plt.xlabel(\"X VALUES\")\r\n plt.ylabel(\"Z VALUES\")\r\n plt.xlim(-4,4)\r\n plt.ylim(-4,4)\r\n plt.title(\"Prediction with \" + str(len(x_train)) + \" samples\")\r\n plt.errorbar(x , mu_new , var , label='predicted values')\r\n plt.scatter(x_train , z_train, color='r', label=\"Samples\")\r\n plt.legend()\r\n plt.show()\r\n\r\n return", "def estimateCovariance(df):\n import numpy as np\n m = df.select(df['scaledFeatures']).map(lambda x: x[0]).mean()\n dfZeroMean = df.select(df['scaledFeatures']).map(lambda x: x[0]).map(lambda x: x-m) # subtract the mean\n\n return dfZeroMean.map(lambda x: np.outer(x,x)).sum()/df.count()", "def fit(self, data, y=None):\n if isinstance(data, pd.DataFrame):\n data = data.to_numpy()\n self.mean = np.mean(data, axis=0)\n self.std = np.std(data, axis=0)", "def multivariate_log_normal_fn(mean, covariance, n, names_column='param', *args, **kwargs):\n\n # clip zeros to prevent breaking log-norm function\n atol = 0.01*covariance[covariance > 0].min().min()\n atol_m = 0.01*mean[mean.value > 0]['value'].min()\n\n _mean = mean.set_index(names_column).clip(lower=atol_m).astype(float, errors='ignore')\n _cov = covariance[_mean.index].reindex(_mean.index)\n\n _cov_diagonal = pd.DataFrame([np.clip(np.diag(_cov), atol, np.inf)], columns=_mean.index)\n for cov_i in _mean.index:\n _cov.at[cov_i, cov_i] = _cov_diagonal[cov_i]\n\n mean_t_mean = np.product(np.meshgrid(_mean.values, _mean.values), axis=0)\n\n mu = np.log(_mean.values[:, 0]) - 0.5 * np.log(1 + np.diag(_cov.values) / np.diag(mean_t_mean))\n # mu = np.log(_mean.values[:, 0])/(np.sqrt(np.diag(_cov.values) / (np.diag(mean_t_mean))+1))\n\n cov = np.log((_cov / mean_t_mean) + 1)\n\n return pd.DataFrame(np.exp(np.random.multivariate_normal(mu, cov, n)), columns=_mean.index.values)\n # return pd.DataFrame(np.random.multivariate_normal(_mean.values[:,0], _cov, n), columns=_mean.index.values)", "def fit(self, X):", "def predictionDistribution(x,beta,sigma2,mu,Cov,x_train,z_train):\r\n ### TODO: Write your code here\r\n X = np.array(x)\r\n X = X[:, np.newaxis]\r\n extra_col = np.ones((X.shape[0], 1))\r\n X = np.append(extra_col, X, axis = 1)\r\n\r\n z_new = []\r\n error = []\r\n\r\n for x_new in X:\r\n z_new.append((x_new@mu).item())\r\n error.append(np.sqrt(abs(((np.transpose(x_new))@Cov@x_new).item() + sigma2)))\r\n\r\n plt.figure(1)\r\n plt.title(\"Predicted and Training Data\")\r\n plt.xlabel('x')\r\n plt.ylabel('y')\r\n plt.axis([-4, 4, -4, 4])\r\n plt.plot(np.array(x), z_new, 'bo')\r\n plt.errorbar(np.array(x), z_new, yerr=error, c = 'b', ecolor='k')\r\n plt.scatter(x_train, z_train,c='r')\r\n plt.legend(['Predictions', 'Training Data'])\r\n plt.show()\r\n \r\n return", "def create_multivariat(mean, cov, n,show):\n\tif n==1:\n\t\tx=np.random.default_rng().multivariate_normal(mean, cov)\n\telse:\n\t\tx=np.random.default_rng().multivariate_normal(mean, cov, n)\n\tif show:\n\t \tdf=pd.DataFrame({'x':x[:,0],'y':x[:,1]})\n\t \tsns.jointplot(data=df,x='x',y='y')\n\treturn x", "def gaussian_fit(self):\r\n\r\n self.df5 = pd.DataFrame(columns=['Slit Number', 'Centre', 'Centre_err', 'Sigma', 'Sigma_err', 'FWHM', 'FWHM_err', 'Height', 'Height_err'])\r\n QDot_slits = self.QDot_detection()\r\n\r\n if len(QDot_slits) > 0: \r\n self.plot_data = pd.DataFrame(columns=[f\"{QDot_slits[0]}\"], index=self.energies)\r\n else:\r\n self.plot_data = pd.DataFrame(index=self.energies)\r\n\r\n for slit_number in QDot_slits:\r\n sel = self.df4[f'{slit_number}']\r\n self.plot_data[f'{slit_number}'] = sel\r\n \r\n # Makes a good first guess for the fit values of the gaussian\r\n max_intensity = max(sel)\r\n central_energy = sel[sel==max_intensity].index.values\r\n central_energy = central_energy[0]\r\n\r\n # Fits a gaussian model to the selected data and shows the output\r\n gauss = models.GaussianModel()\r\n fit = gauss.fit(sel, x=self.energies, weights=1 / np.sqrt(sel), center = central_energy, amplitude = max_intensity, sigma = 1, nan_policy= 'omit')\r\n \r\n self.plot_data[f'{slit_number} best fit'] = fit.best_fit\r\n\r\n # Appends the fit data for the variables to a new dataframe and shows the fit results with errors\r\n fit_variables = [slit_number]\r\n for key in fit.params:\r\n if key in ['center', 'sigma', 'fwhm', 'height']:\r\n fit_variables.append(fit.params[key].value)\r\n fit_variables.append(fit.params[key].stderr)\r\n \r\n self.df5 = self.df5.append({'Slit Number': fit_variables[0], 'Centre': fit_variables[1], 'Centre_err': fit_variables[2], 'Sigma': fit_variables[3], 'Sigma_err': fit_variables[4], 'FWHM': fit_variables[5], 'FWHM_err': fit_variables[6], 'Height': fit_variables[7], 'Height_err': fit_variables[8]}, ignore_index=True)\r\n \r\n return self.plot_data, self.df5", "def test_sample(self, normal_mock):\n # Setup\n instance = GaussianMultivariate(GaussianUnivariate)\n data = pd.DataFrame([\n {'A': 25, 'B': 75, 'C': 100},\n {'A': 30, 'B': 60, 'C': 250},\n {'A': 10, 'B': 65, 'C': 350},\n {'A': 20, 'B': 80, 'C': 150},\n {'A': 25, 'B': 70, 'C': 500}\n ])\n instance.fit(data)\n\n normal_mock.return_value = np.array([\n [0.1, 0.1, 0.1],\n [0.2, 0.2, 0.2],\n [0.4, 0.4, 0.4],\n [0.6, 0.6, 0.6],\n [0.8, 0.8, 0.8]\n ])\n\n expected_result = pd.DataFrame([\n {'A': 22.678232998312527, 'B': 70.70710678118655, 'C': 284.35270009440734},\n {'A': 23.356465996625055, 'B': 71.41421356237309, 'C': 298.7054001888146},\n {'A': 24.712931993250110, 'B': 72.82842712474618, 'C': 327.4108003776293},\n {'A': 26.069397989875164, 'B': 74.24264068711929, 'C': 356.116200566444},\n {'A': 27.425863986500215, 'B': 75.65685424949238, 'C': 384.8216007552586}\n ])\n\n # Run\n result = instance.sample(5)\n\n # Check\n assert result.equals(expected_result)\n\n assert normal_mock.called_once_with(\n np.zeros(instance.covariance.shape[0]),\n instance.covariance,\n 5\n )", "def _fit(self, df):\n return df", "def postfit_covariance(self) -> NONEARRAY:\n pass", "def gauss_fit(seld, data=''):\n mean, std = norm.fit(data)\n return mean, std", "def _generate_distribution_samples(self, set_count, parameter_count):\n self._samples = numpy.zeros((set_count, parameter_count))\n for i, distribution in enumerate(self.parameter_distributions.values()):\n self._samples[:, i] = distribution.ppf(self._quantiles[:, i])", "def Transform_Statistics(Covariance_Matrix, Corr_Matrix, Sigma, Means, NParams, dist, depend):\n\n if dist=='normal':\n return Covariance_Matrix, Corr_Matrix, Sigma, Means\n elif dist=='lognormal':\n return Transform_LogNormal(Covariance_Matrix, Corr_Matrix, Sigma, Means, NParams, depend)\n elif dist=='uniform':\n if depend=='corr':\n print \"Correlated sampling not support for uniform yet\"\n raise ValueError\n else:\n return Covariance_Matrix, Corr_Matrix, Sigma, Means\n else:\n print 'Distribution not recognised'", "def evaluate_fit(path_to_results, filename=\"results.csv\"):\n import pandas as pd\n\n results = pd.read_csv(path_to_results + \"/\" + filename)\n\n def plot(pivotted, variance):\n import seaborn as sns\n import os\n import numpy as np\n from matplotlib.colors import LogNorm\n import math\n\n barmin, barmax = 1e-18, 1e-8\n cbar_ticks = [1e-20, 1e-18, 1e-16, 1e-14, 1e-12, 1e-10]\n log_norm = LogNorm(vmin=barmin, vmax=barmax)\n ax = sns.heatmap(\n pivotted,\n cmap=\"coolwarm\",\n vmax=barmax,\n vmin=barmin,\n norm=log_norm,\n cbar_kws={\"ticks\": cbar_ticks},\n ) # , yticklabels=achsislabel_y, xticklabels=achsislabel_x)\n # ax.invert_yaxis()\n fig = ax.get_figure()\n if not os.path.exists(path_to_results + \"/heatmap_variance\"):\n os.mkdir(path_to_results + \"/heatmap_variance\")\n\n fig.savefig(\n path_to_results + \"/heatmap_variance\" + \"/\" + str(obs_loc) + \"_\" + variance,\n dpi=dpi,\n )\n fig.clf()\n\n from processing import identify_numbers_from_string\n\n for obs_loc in results[\"obs_loc\"].unique():\n # extract only rows with obs_loc==obs_loc\n df_obs_loc = results[results.obs_loc == obs_loc]\n # extract columns for plotting\n df_obs_loc_cut = df_obs_loc[[\"S_in\", \"T_in\", \"cov\"]]\n # get values for sigma S and sigma T seperately from column cov\n df_obs_loc_cut[\"cov_numbers\"] = df_obs_loc_cut[\"cov\"].apply(\n identify_numbers_from_string\n )\n df_obs_loc_cut[\"sigma_S\"] = df_obs_loc_cut[\"cov_numbers\"].apply(lambda x: x[0])\n df_obs_loc_cut[\"sigma_T\"] = df_obs_loc_cut[\"cov_numbers\"].apply(lambda x: x[3])\n # convert objects to floats\n df_obs_loc_cut.sigma_S = pd.to_numeric(df_obs_loc_cut.sigma_S)\n df_obs_loc_cut.sigma_T = pd.to_numeric(df_obs_loc_cut.sigma_T)\n for variance in [\"sigma_S\", \"sigma_T\"]:\n pivot_df_obs_loc_cut = df_obs_loc_cut.pivot(\"S_in\", \"T_in\", variance)\n # plot heatmap\n import numpy as np\n\n plot(pivot_df_obs_loc_cut, variance)", "def _4_normal_spread():\n return [mn(mean=np.array([10.0, 10.0]),\n cov=np.array([[1.0, 0.0], [0.0, 1.0]])),\n mn(mean=np.array([10.0, -10.0]),\n cov=np.array([[1.0, 0.0], [0.0, 1.0]])),\n mn(mean=np.array([-10.0, -10.0]),\n cov=np.array([[1.0, 0.0], [0.0, 1.0]])),\n mn(mean=np.array([-10.0, 10.0]),\n cov=np.array([[1.0, 0.0], [0.0, 1.0]]))]", "def __call__(self, shape):\n return np.random.normal(loc=self.mean, scale=self.stddev, size=shape)" ]
[ "0.6308276", "0.62867206", "0.62607855", "0.6049176", "0.5958321", "0.58459413", "0.58255345", "0.58052725", "0.57393396", "0.57337683", "0.5629323", "0.56162274", "0.5598536", "0.5563038", "0.55481696", "0.54957813", "0.54714674", "0.5460343", "0.5455187", "0.5437176", "0.54251486", "0.5414902", "0.54073226", "0.5398037", "0.53553444", "0.53468084", "0.5333447", "0.5330108", "0.5319954", "0.5319138" ]
0.66464084
0
On fit, the distributions for each column use instances of copula.distribution.
def test_fit_distribution_arg(self): # Setup distribution = 'copulas.univariate.gaussian_kde.GaussianKDE' copula = GaussianMultivariate(distribution=distribution) # Run copula.fit(self.data) # Check assert copula.distribution == 'copulas.univariate.gaussian_kde.GaussianKDE' for i, key in enumerate(self.data.columns): assert copula.columns[i] == key assert get_qualified_name(copula.univariates[i].__class__) == copula.distribution expected_covariance = copula._get_covariance(self.data) assert (copula.covariance == expected_covariance).all().all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_fit_distribution_selector(self):\n copula = GaussianMultivariate(distribution={\n 'column1': 'copulas.univariate.beta.BetaUnivariate',\n 'column2': 'copulas.univariate.gaussian_kde.GaussianKDE',\n })\n copula.fit(self.data)\n\n assert get_qualified_name(\n copula.univariates[0].__class__) == 'copulas.univariate.beta.BetaUnivariate'\n assert get_qualified_name(\n copula.univariates[1].__class__) == 'copulas.univariate.gaussian_kde.GaussianKDE'\n assert get_qualified_name(\n copula.univariates[2].__class__) == 'copulas.univariate.base.Univariate'", "def test_fit_default_distribution(self):\n\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data)\n\n for i, key in enumerate(self.data.columns):\n assert copula.columns[i] == key\n assert copula.univariates[i].__class__ == GaussianUnivariate\n assert copula.univariates[i]._params['loc'] == self.data[key].mean()\n assert copula.univariates[i]._params['scale'] == np.std(self.data[key])\n\n expected_covariance = copula._get_covariance(self.data)\n assert (copula.covariance == expected_covariance).all().all()", "def fit(self, X: np.ndarray, y: np.ndarray):\n\n # For each feature column index in X\n for col_idx in range(X.shape[1]):\n if col_idx not in self.column_distribution_map:\n raise ValueError(f\"No distribution given for column {col_idx}\")\n\n # If the column has a multinomial tag, fit a multinomial.\n if self.column_distribution_map[col_idx] == \"multinomial\":\n self.fitted_distributions[col_idx] = self._fit_multinomial(\n X=X, col_idx=col_idx, y=y\n )\n # Otherwise fit a Gaussian\n elif self.column_distribution_map[col_idx] == \"gaussian\":\n self.fitted_distributions[col_idx] = self._fit_gaussian(\n X=X, col_idx=col_idx, y=y\n )\n\n self.is_fitted = True\n # The prior P(C) gets set to multinomial with p as the\n # proportion of observations in each class C\n self.prior = stats.multinomial(\n n=len(y), p=[np.sum(y == val) / len(y) for val in sorted(set(y))]\n )", "def fit_multidistribution_to_hours(self, column_name, day_ahead, hours,\n marginal_class, copula_class,\n criterion=None, marginal_options=None,\n copula_options=None):\n self.check_for_column(column_name)\n if marginal_options is None:\n marginal_options = {}\n if copula_options is None:\n copula_options = {}\n\n hourly_sources = source.split_source_at_hours(hours_in_range)\n hourly_windows = {hour: source.rolling_window(day_ahead)\n for hour, source in hourly_sources.items()}\n\n #This segments the data and fits a univariate distribution to the\n #segmented data.\n segmented_windows = {}\n marginals = {}\n forecasts_hour = {}\n for hour, window in hourly_windows.items():\n curr_dt = day_ahead + datetime.timedelta(hours=hour)\n\n # If criterion is not passed in, we do no segmentation\n if criterion is not None:\n segmented_windows[hour] = window.segment(curr_dt, criterion)\n else:\n segmented_windows[hour] = window\n series = window.get_column(column_name).tolist()\n distr = marginal_class.fit(series, **marginal_options)\n marginals[hour] = distr\n\n #To fit a copula to the data we need all data, not only the seperated one.\n #We have to transform it to [0,1]^n for the purposes of fitting a copula.\n hourly_df = source.get_column_at_hours(column_name, hours_in_range)\n transformed_series = {}\n for hour in hours_in_range:\n hourly_df[hour] = hourly_df[hour] + forecasts_hour[hour]\n transformed = [marginals[hour].cdf(x) for x in hourly_df[hour]]\n transformed_series[hour] = transformed\n\n #First fitting a copula to the transformed data and then computing a\n #multivariate distribution using the copula and the marginals.\n fitted_copula = copula_class.fit(transformed_series, hours,\n **copula_options)\n f = copula.CopulaWithMarginals(fitted_copula, marginals, hours)", "def initializeDistribution(self):\n self.raiseAMessage('initialize distribution')\n mu = distribution1D.vectord_cxx(len(self.mu))\n for i in range(len(self.mu)):\n mu[i] = self.mu[i]\n covariance = distribution1D.vectord_cxx(len(self.covariance))\n for i in range(len(self.covariance)):\n covariance[i] = self.covariance[i]\n if self.method == 'spline':\n if self.covarianceType != 'abs':\n self.raiseAnError(IOError,'covariance with type ' + self.covariance + ' is not implemented for ' + self.method + ' method')\n self._distribution = distribution1D.BasicMultivariateNormal(covariance, mu)\n elif self.method == 'pca':\n self._distribution = distribution1D.BasicMultivariateNormal(covariance, mu, str(self.covarianceType), self.rank)\n if self.transformation:\n self.lowerBound = [-sys.float_info.max]*self.rank\n self.upperBound = [sys.float_info.max]*self.rank\n else:\n self.lowerBound = [self.returnLowerBound(dim) for dim in range(self.dimension)]\n self.upperBound = [self.returnUpperBound(dim) for dim in range(self.dimension)]", "def test_cumulative_distribution_fit_call_pd(self):\n # Setup\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data.to_numpy())\n X = np.array([2000., 200., 1.])\n expected_result = 0.4550595153746892\n\n # Run\n result = copula.cumulative_distribution(X)\n\n # Check\n assert np.isclose(result, expected_result, atol=1e-5).all().all()", "def test_cumulative_distribution_fit_df_call_np_array(self):\n # Setup\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data)\n X = np.array([2000., 200., 1.])\n expected_result = 0.4550595153746892\n\n # Run\n result = copula.cumulative_distribution(X)\n\n # Check\n assert np.isclose(result, expected_result, atol=1e-5).all().all()", "def test_fit_numpy_array(self):\n # Setup\n copula = GaussianMultivariate(\n distribution='copulas.univariate.gaussian.GaussianUnivariate')\n\n # Run\n copula.fit(self.data.to_numpy())\n\n # Check\n for key, (column, univariate) in enumerate(zip(self.data.columns, copula.univariates)):\n assert univariate._params['loc'] == np.mean(self.data[column])\n assert univariate._params['scale'] == np.std(self.data[column])\n\n expected_covariance = copula._get_covariance(pd.DataFrame(self.data.to_numpy()))\n assert (copula.covariance == expected_covariance).all().all()", "def doCols(col):\n p = []\n for clf in clfs:\n # print 'trainPreprocessed:', trainPreprocessed, trainPreprocessed.shape\n # print 'labels_train[:, col]', labels_train[:, col], labels_train[:, col].shape\n clf.fit(trainPreprocessed, labels_train[:, col])\n p.append(clf.predict_proba(testPreprocessed)[:, 1])\n return p", "def fit_mle(data, copula, marginals, opti_method='SLSQP', known_parameters=False):\n\n if copula.type == \"mixture\":\n print(\"estimation of mixture only available with CMLE try fit mle\")\n raise error\n \n if known_parameters == True:\n\n marg_cdf1 = lambda i : marginals[0][\"distribution\"].cdf(data[0][i], marginals[0][\"loc\"], marginals[0][\"scale\"]) \n marg_pdf1 = lambda i : marginals[0][\"distribution\"].pdf(data[0][i], marginals[0][\"loc\"], marginals[0][\"scale\"])\n\n marg_cdf2 = lambda i : marginals[1][\"distribution\"].cdf(data[1][i], marginals[1][\"loc\"], marginals[1][\"scale\"]) \n marg_pdf2 = lambda i : marginals[1][\"distribution\"].pdf(data[1][i], marginals[1][\"loc\"], marginals[1][\"scale\"]) \n\n logi = lambda i, theta: np.log(copula.get_pdf(marg_cdf1(i),marg_cdf2(i),[theta]))+np.log(marg_pdf1(i)) +np.log(marg_pdf2(i))\n log_likelihood = lambda theta: -sum([logi(i, theta) for i in range(0,len(data[0]))])\n\n results = minimize(log_likelihood, copula.parameters_start, method=opti_method, )# options={'maxiter': 300})#.x[0]\n\n else:\n marg_cdf1 = lambda i, loc, scale : marginals[0][\"distribution\"].cdf(data[0][i], loc, scale) \n marg_pdf1 = lambda i, loc, scale : marginals[0][\"distribution\"].pdf(data[0][i], loc, scale)\n\n marg_cdf2 = lambda i, loc, scale : marginals[1][\"distribution\"].cdf(data[1][i], loc, scale) \n marg_pdf2 = lambda i, loc, scale : marginals[1][\"distribution\"].pdf(data[1][i], loc, scale) \n\n logi = lambda i, theta, loc1, scale1, loc2, scale2: \\\n np.log(copula.get_pdf(marg_cdf1(i, loc1, scale1),marg_cdf2(i, loc2, scale2),[theta])) \\\n + np.log(marg_pdf1(i, loc1, scale1)) +np.log(marg_pdf2(i, loc2, scale2))\n \n def log_likelihood(params):\n theta, loc1, scale1, loc2, scale2 = params\n return -sum([logi(i, theta, loc1, scale1, loc2, scale2) for i in range(0,len(data[0]))])\n\n results = minimize(log_likelihood, (copula.parameters_start, np.array(0), np.array(1), np.array(0), np.array(1)), method=opti_method, )# options={'maxiter': 300})#.x[0]\n\n print(\"method:\", opti_method, \"- success:\", results.success, \":\", results.message)\n if results.success == True:\n return results.x\n\n print(\"Optimization failed\")\n return None", "def fit_scalers(self, df: pd.DataFrame) -> None:\n for feature, scaler in self._scalers.items():\n if feature == \"season\":\n scaler.fit(df[\"season\"].unique().reshape(-1, 1))\n elif feature in FEATURES_TO_SCALE:\n values = np.concatenate((df[f\"home_{feature}\"].values, df[f\"away_{feature}\"].values))\n scaler.fit(np.unique(values).reshape(-1, 1))\n else:\n scaler.fit(df[feature].unique().reshape(-1, 1))", "def inspect_distribution(self, column, y=None):\n check_is_fitted(self)\n\n if y is None:\n ys = self.classes_\n else:\n ys = [y]\n\n distributions = {}\n for yi in ys:\n y_index = self.class_map_[yi]\n variable_index = self.column_map_[column]\n dist_i = self.model_.distributions[y_index].distributions[variable_index]\n distributions[yi] = dist_i\n\n if len(distributions) == 1:\n return distributions[y]\n return distributions", "def fit(self, dataset, _=None):\n print(f\"Analyzing {len(dataset.columns)} columns for deficiencies...\")\n duplicates = dataset.columns[dataset.T.duplicated()]\n singles = dataset.columns[dataset.nunique() == 1]\n self.deficient = set().union(duplicates).union(singles)\n return self", "def __calculate_cdf(self):\n \n for (f, dist) in self.__dists.iteritems():\n for dict in self.__queries:\n dict['rss_lower_cdf']=(dict[qs.QRY_LRSS]/self.__db_size)\n dict['rss_upper_cdf']=(dict[qs.QRY_URSS]/self.__db_size)", "def initializeDistribution(self):\n if self.functionType == 'CDF':\n self._distribution = distribution1D.BasicMultiDimensionalInverseWeight(str(self.dataFilename), self.p,True)\n else:\n self._distribution = distribution1D.BasicMultiDimensionalInverseWeight(str(self.dataFilename), self.p,False)\n self.dimensionality = self._distribution.returnDimensionality()\n self.lowerBound = [self.returnLowerBound(dim) for dim in range(self.dimensionality)]\n self.upperBound = [self.returnUpperBound(dim) for dim in range(self.dimensionality)]", "def initializeDistribution(self):\n\n f = open(self.dataFilename, 'r')\n reader = csv.reader(f)\n headers = next(reader)\n indexFunctionID = headers.index(self.functionID)\n indexVariableID = headers.index(self.variableID)\n f.close()\n rawData = np.genfromtxt(self.dataFilename, delimiter=\",\" , skip_header=1, usecols=(indexVariableID,indexFunctionID))\n\n self.data = rawData[rawData[:,0].argsort()]\n self.lowerBound = self.data[0,0]\n self.upperBound = self.data[-1,0]\n\n if self.functionType == 'cdf':\n self.cdfFunc = UnivariateSpline(self.data[:,0], self.data[:,1], k=self.k, s=self.s)\n self.pdfFunc = self.cdfFunc.derivative()\n self.invCDF = UnivariateSpline(self.data[:,1], self.data[:,0], k=self.k, s=self.s)\n else:\n self.pdfFunc = UnivariateSpline(self.data[:,0], self.data[:,1], k=self.k, s=self.s)\n cdfValues = np.zeros(self.data[:,0].size)\n for i in range(self.data[:,0].size):\n cdfValues[i] = self.pdfFunc.integral(self.data[0][0],self.data[i,0])\n self.invCDF = UnivariateSpline(cdfValues, self.data[:,0] , k=self.k, s=self.s)\n\n # Note that self.invCDF is creating a new spline where I switch its term.\n # Instead of doing spline(x,f(x)) I am creating its inverse spline(f(x),x)\n # This can be done if f(x) is monothonic increasing with x (which is true for cdf)", "def d(self, df):\n # Get variable names\n var = [key for key, _ in self.marginals.items()]\n df_u = self.sample2pr(df)[var]\n # Evaluate copula density\n l_copula = self.copula.d(df_u.values)\n # Evaluate marginal densities\n L_marginals = zeros((df.shape[0], len(var)))\n for i, v in enumerate(var):\n L_marginals[:, i] = self.marginals[v].d(df[v])\n l_marginals = prod(L_marginals, axis=1)\n\n return l_copula * l_marginals", "def fit_dist(self, instances):\n dists = []\n for i in range(len(instances[0])):\n component = [instances[k][i] for k in instances.keys()]\n dist = norm.fit(component)\n dists.append(dist)\n\n def sample():\n instance = []\n for d in dists:\n instance.append(np.random.normal(d[0], d[1]))\n return instance\n\n return sample", "def _generate_distribution_samples(self, set_count, parameter_count):\n self._samples = numpy.zeros((set_count, parameter_count))\n for i, distribution in enumerate(self.parameter_distributions.values()):\n self._samples[:, i] = distribution.ppf(self._quantiles[:, i])", "def __init__(\n self,\n column_distribution_map: dict,\n alpha: float = 1,\n binomial: bool = False,\n verbose: bool = True,\n ):\n self.binomial = binomial\n self.column_distribution_map = column_distribution_map\n self.fitted_distributions = {}\n self.is_fitted = False\n self.alpha = alpha\n self.verbose = verbose", "def marginalize(self, axis):\n \n dist = {}\n\n # -------------------------------------------------------------------------\n # YOUR CODE GOES HERE\n #\n \n # get relevant data based on the given random variable\n for i in self._table:\n if axis == 0:\n if i[0] in dist:\n dist[i[0]] += self._table[i]\n else:\n dist[i[0]] = self._table[i]\n else:\n if i[1] in dist:\n dist[i[1]] += self._table[i]\n else:\n dist[i[1]] = self._table[i]\n\n\n #\n # END OF YOUR CODE\n # ------------------------------------------------------------------------- \n\n return dist", "def fit(self, X: pd.DataFrame, y: Optional[pd.Series] = None):\n\n # check input dataframe\n X = super().fit(X)\n\n self.lambda_dict_ = {}\n\n for var in self.variables_:\n _, self.lambda_dict_[var] = stats.boxcox(X[var])\n\n return self", "def compare_parametrical_distribution(df, col, nbins=None, par_distr = stats.gamma):\n sns.distplot(df[col], kde=False, bins=nbins, fit=stats.gamma)", "def fit_distribution_to_column(self, column_name, distr_class,\n **distr_options):\n self.check_for_column(column_name)\n values = self.get_column(column_name).tolist()\n return distr_class.fit(values, **distr_options)", "def cdf(X, parameters):\n check_data_type_column_data(X)\n check_model_params_dict(parameters)\n\n sigma = (1.0/parameters['rho'])**.5\n\n return norm.cdf(X,parameters['mu'],sigma)", "def test_cumulative_distribution_fit_call_np_array(self):\n # Setup\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data.to_numpy())\n X = np.array([2000., 200., 1.])\n expected_result = 0.4550595153746892\n\n # Run\n result = copula.cumulative_distribution(X)\n\n # Check\n assert np.isclose(result, expected_result, atol=1e-5).all().all()", "def get_distrib_of_correlations(num_columns, percent_indep,\n distrib=\"uniform\", indep_slack=0.05,\n percent_anti_dep=0.5, num_bins=10,\n random_seed=RANDOM_SEED):\n np_random = np.random.RandomState(random_seed)\n\n num_corrs = int(round((num_columns**2 - num_columns) / 2.0))\n num_indep_corrs = int(round(num_corrs * percent_indep))\n num_dep_corrs = num_corrs - num_indep_corrs\n indep_corrs = []\n dep_corrs = []\n anti_dep_corrs = []\n\n # 1. if uniform, use uniform distrib to fill in desired dep strs\n if distrib == \"uniform\":\n # 1a. add independent scores\n while len(indep_corrs) < num_indep_corrs:\n score = np_random.uniform(0.0 - indep_slack, 0.0 + indep_slack + 0.000001)\n if score >= 0.0 - indep_slack and score <= 0.0 + indep_slack:\n indep_corrs.append(score)\n\n # 1b. add anti-dependent scores\n anti_dep_corrs += list(np_random.uniform(\n -1.0,\n 0.0 - indep_slack,\n size=int(math.floor(num_dep_corrs * percent_anti_dep))\n ))\n\n # 1c. add dependent scores\n while len(dep_corrs) < int(math.ceil(num_dep_corrs * (1.0 - percent_anti_dep))):\n score = np_random.uniform(0.0 + indep_slack, 1.000001)\n if score > 0.0 + indep_slack and score <= 1.0:\n dep_corrs.append(score)\n\n # 2. if normal, use normal distrib to fill in desired dep strs\n elif distrib == \"normal\":\n # 2a. add independent scores\n while len(indep_corrs) < num_indep_corrs:\n scale = indep_slack / 4.0\n score = np_random.normal(loc=0.0, scale=scale)\n if score >= 0.0 - indep_slack and score <= 0.0 + indep_slack:\n indep_corrs.append(score)\n\n # 2b. add anti-dependent scores\n while len(anti_dep_corrs) < int(math.floor(num_dep_corrs * percent_anti_dep)):\n loc = (-1.0 - indep_slack) / 2.0\n scale = abs(loc / 4.0)\n score = np_random.normal(loc=loc, scale=scale)\n if score >= -1.0 and score < 0.0 - indep_slack:\n anti_dep_corrs.append(score)\n\n # 2c. add dependent scores\n while len(dep_corrs) < int(math.ceil(num_dep_corrs * (1.0 - percent_anti_dep))):\n loc = (1.0 + indep_slack) / 2.0\n scale = loc / 4.0\n score = np_random.normal(loc=loc, scale=scale)\n if score > 0.0 + indep_slack and score <= 1.0:\n dep_corrs.append(score)\n else:\n raise Exception(\"Invalid distribution specified.\")\n\n # 3. return corrs\n corrs = anti_dep_corrs + indep_corrs + dep_corrs\n print \"num_columns={}, num_corrs={}, num_dep_corrs={}, num_indep_corrs={}, len(scores)={}\"\\\n .format(num_columns, num_corrs, num_dep_corrs, num_indep_corrs, len(corrs))\n return corrs", "def analyze_distributions(self, data_frame: pd.DataFrame):\n logging.info(f\"ModelContainer {self.name}: analyze_distributions\")\n\n self.feature_uniques = get_unique_feature_values(\n data_frame, self.features_categorical\n )\n self.feature_summaries = describe_features(data_frame, self.features_numeric)\n\n return self", "def _fit(self, dataset):\n self.dataset = dataset\n self.masker = self.masker or dataset.masker\n self.null_distributions_ = {}\n\n ma_values = self._collect_ma_maps(\n coords_key=\"coordinates\",\n maps_key=\"ma_maps\",\n fname_idx=0,\n )\n\n # Determine bins for null distribution histogram\n max_ma_values = np.max(ma_values, axis=1)\n max_poss_ale = self._compute_summarystat(max_ma_values)\n self.null_distributions_[\"histogram_bins\"] = np.round(\n np.arange(0, max_poss_ale + 0.001, 0.0001), 4\n )\n\n stat_values = self._compute_summarystat(ma_values)\n\n iter_df = self.inputs_[\"coordinates\"].copy()\n rand_idx = np.random.choice(self.xyz.shape[0], size=(iter_df.shape[0], self.n_iters))\n rand_xyz = self.xyz[rand_idx, :]\n iter_xyzs = np.split(rand_xyz, rand_xyz.shape[1], axis=1)\n\n # Define parameters\n iter_dfs = [iter_df] * self.n_iters\n params = zip(iter_dfs, iter_xyzs)\n\n if self.n_cores == 1:\n if self.memory_limit:\n perm_scale_values = np.memmap(\n self.memmap_filenames[1],\n dtype=stat_values.dtype,\n mode=\"w+\",\n shape=(self.n_iters, stat_values.shape[0]),\n )\n else:\n perm_scale_values = np.zeros(\n (self.n_iters, stat_values.shape[0]), dtype=stat_values.dtype\n )\n for i_iter, pp in enumerate(tqdm(params, total=self.n_iters)):\n perm_scale_values[i_iter, :] = self._run_permutation(pp)\n if self.memory_limit:\n # Write changes to disk\n perm_scale_values.flush()\n else:\n with mp.Pool(self.n_cores) as p:\n perm_scale_values = list(\n tqdm(p.imap(self._run_permutation, params), total=self.n_iters)\n )\n perm_scale_values = np.stack(perm_scale_values)\n\n p_values, z_values = self._scale_to_p(stat_values, perm_scale_values)\n\n del perm_scale_values\n\n logp_values = -np.log10(p_values)\n logp_values[np.isinf(logp_values)] = -np.log10(np.finfo(float).eps)\n\n # Write out unthresholded value images\n images = {\"stat\": stat_values, \"logp\": logp_values, \"z\": z_values}\n return images", "def fit ( self, X ):\n \n if self.mean:\n self.df_means = X.mean ( axis = 0 ) # Get the colwise means\n if self.std:\n self.df_std = X.std ( axis = 0 ) # Get the colwise stds" ]
[ "0.6521201", "0.62778795", "0.57117486", "0.54257816", "0.5407162", "0.5365817", "0.5301138", "0.52958846", "0.5239649", "0.5227557", "0.5182711", "0.51733905", "0.51689273", "0.5161067", "0.51581097", "0.51333064", "0.51219803", "0.5097027", "0.5093943", "0.5085199", "0.5080811", "0.50743794", "0.50741696", "0.5069979", "0.5060999", "0.50332606", "0.49955314", "0.4983297", "0.4966522", "0.49539357" ]
0.63467205
1
Probability_density computes probability for the given values.
def test_probability_density(self): # Setup copula = GaussianMultivariate(GaussianUnivariate) copula.fit(self.data) X = np.array([2000., 200., 0.]) expected_result = 0.032245296420409846 # Run result = copula.probability_density(X) # Check assert expected_result - 1e-16 < result < expected_result + 1e-16
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def probability_density(self, X):\n raise NotImplementedError", "def probability_density(dic):\n\n var = dic['var']\n par = dic['par']\n y1 = dic['y']\n y = y1.conjugate() * y\n return dic_result(var,par,y)", "def prob_density_func(xs,norm=True,data_range='data'):\n if data_range=='data':\n dist_keys = set(xs)\n elif data_range=='ext_data':\n dist_keys = range(min(xs),max(xs)+1)\n else:\n dist_keys = data_range\n \n pdf = dict([(k,0.0) for k in dist_keys])\n for x in xs:\n pdf[x] += 1.0\n if norm:\n pdf.update([(k,pdf[k]/sum(pdf.values())) for k in pdf.keys()])\n return pdf", "def gc_prob_density(r):\n return np.exp(_interp_ln_dens(r))", "def density(self, x):\n\t\tN = len(self.train_data)\n\t\tpoints = list(self.train_data)\n\t\tdists = [np.linalg.norm(x-point)**2 for point in points]\n\t\texps = [np.exp(-dist / (2 * (self.bandwidth ** 2))) for dist in dists]\n\t\tunnormalized_sum = sum(exps)\n\t\tprobability = (1 / N) * self.normalizing_constant() * unnormalized_sum\n\t\treturn probability", "def density(x, y, pmap, amplitude=True):\n\n d = pmap['density']\n params = [pmap['x_mean'], pmap['y_mean'], pmap['sigma']]\n\n if d == 'gaussian':\n pdf = gaussian(x, y, params)\n \n elif d == 'students':\n pdf = student(x, y, pmap['nu'], params)\n \n if amplitude:\n pdf = pmap['amplitude'] * pdf\n\n return pdf", "def probability(distances):\n v = [1.0/(d + 1) for d in distances]\n s = sum(v)\n return [i/s for i in v]", "def define_pdf(self, values: torch.Tensor, weights: torch.Tensor, inds: torch.Tensor) -> Distribution:\n\n raise NotImplementedError()", "def find_density(attr, D, h):\n d = D.shape[1]\n n = D.shape[0]\n total = 0\n for xi in D:\n kernel = find_kernel_value(attr, xi, h, d)\n total += kernel\n return total / (n * h ** d)", "def _estimate_density(self, x):\n\n self.density_, self.bins_ = np.histogram(x, bins=10, density=True)", "def density(self, arg):\n out = 0\n for weight, mean, std in zip(self.weights, self.means, self.stds):\n scale = std * self.data['maturity']**.5\n loc = ((mean - self.data['riskfree']) *\n self.data['maturity'] - scale**2)\n out += weight * scs.norm(loc, scale).pdf(arg)\n return out", "def _calc_density(x: np.ndarray, y: np.ndarray):\n from scipy.stats import gaussian_kde\n\n # Calculate the point density\n xy = np.vstack([x, y])\n z = gaussian_kde(xy)(xy)\n\n min_z = np.min(z)\n max_z = np.max(z)\n\n # Scale between 0 and 1\n scaled_z = (z - min_z) / (max_z - min_z)\n\n return scaled_z", "def one_body_density(self, positions):\n\n num_radii = 41\n density = np.zeros(num_radii)\n r_vec = np.linspace(0, 4, num_radii)\n step = r_vec[1] - r_vec[0]\n\n # Calculate the distance from origo of each particle\n radii = np.zeros(self.num_p)\n for i in range(self.num_p):\n r = 0\n for j in range(self.num_d):\n r += positions[i, j]*positions[i, j]\n radii[i] = math.sqrt(r)\n\n # Check in which segment each particle is in\n for i in range(self.num_p):\n dr = 0.0\n for j in range(num_radii):\n if(dr <= radii[i] < dr+step):\n density[j] += 1\n break\n else:\n dr += step\n\n return density", "def density(self, arg):\n mean = - self.sigma**2 * self.data['maturity']\n std = self.sigma * self.data['maturity']**.5\n return scs.norm(mean, std).pdf(arg)", "def rate_density(x, a):\n return a * x", "def cum_density_func(xs,norm=True,rank=False,data_range='data',pdf=None):\n if pdf is None:\n pdf = prob_density_func(xs,False,data_range)\n pdfk = sorted(pdf.keys())\n pdfv = map(pdf.get,pdfk)\n if not rank:\n cdfv = np.cumsum(pdfv)\n if norm:\n cdfv = cdfv/np.sum(pdfv)\n else:\n cdfv = np.arange(1,len(pdfk)+1)\n if norm:\n cdfv = cdfv/float((len(pdfk)+1))\n return dict(zip(pdfk,cdfv))", "def pdf(self,x):\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n pdfValue = self._distribution.pdf(coordinate)\n return pdfValue", "def pdf(self,x):\n coordinate = distribution1D.vectord_cxx(len(x))\n for i in range(len(x)):\n coordinate[i] = x[i]\n pdfValue = self._distribution.pdf(coordinate)\n return pdfValue", "def dirichlet_pdf(x, alpha):\n density = reduce(operator.mul, \n [x[i]**(alpha[i]-1.0) for i in range(len(alpha))])\n norm_top = gamma(np.sum(alpha))\n norm_bot = reduce(operator.mul, [gamma(a) for a in alpha])\n return (norm_top / norm_bot) * density", "def density(temp,pres):\n g_p = liq_g(0,1,temp,pres)\n dliq = g_p**(-1)\n return dliq", "def new_density(points):\n inshape = points.shape\n points = points.view(-1, 3)\n out = []\n for p in points:\n if torch.all(p > 0.5) or torch.all(p < -0.5):\n out.append(torch.tensor([[1.0]]))\n else:\n out.append(torch.tensor([[0.0]]))\n return torch.cat(out).view(*inshape[:-1], 1).to(device)", "def pdf(self, grid, dataSegment):\n return self.density(dataSegment[0], *grid)", "def cal_prob(density_df, adv_intcpt, adv_slope, dis_intcpt, dis_slope):\n\n def p_adv(density):\n prob = adv_intcpt+adv_slope*density\n return prob\n def p_dis(density):\n prob = dis_intcpt+(dis_slope)*density\n return prob\n density_df['PROB'] = density_df[['Density', 'st0']].apply(\n lambda x: p_adv(x['Density']) if x['st0']==0 else p_dis(x['Density']), axis=1)\n density_df['PROB'] = density_df['PROB'].apply(lambda x:max(x,0))\n return density_df", "def get_density(xs, ys, mu, sigma, DIMENSION=2):\n return np.array([[kde(np.array([x,y]), mu, sigma, DIMENSION) for x in xs] for y in ys])", "def prob4():\n#raise NotImplementedError(\"Problem 4 Incomplete\")\n h = lambda x : x[0] < -1 and x[1] > 1\n f = lambda x : stats.multivariate_normal.pdf(x,mean=np.array([0,0]),cov=np.eye(2))\n g = lambda x : stats.multivariate_normal.pdf(x,mean=np.array([-1,1]),cov=np.eye(2))\n X = np.random.multivariate_normal(mean=np.array([-1,1]),cov=np.eye(2),size=10000)\n return 1./10000*np.sum(np.apply_along_axis(h,1,X)*np.apply_along_axis(f,1,X)/np.apply_along_axis(g,1,X))", "def pdf(self,x):\n return self.categoricalDist.pdf(x)", "def _density(self):\n fraction = np.array([0.]+[m.value for m in self.fraction])\n # TODO: handle invalid fractions using penalty functions\n # S = sum(fraction)\n # scale = S/100 if S > 100 else 1\n # fraction[0] = 100 - S/scale\n # penalty = scale - 1\n fraction[0] = 100 - sum(fraction)\n if (fraction < 0).any():\n return NaN\n volume = self._volume(fraction)\n density = np.array([m.density() for m in [self.base]+self.material])\n return np.sum(volume*density)", "def compute_empirical_distribution(values):\n distribution = {}\n\n # -------------------------------------------------------------------------\n # YOUR CODE HERE\n #\n for value in values:\n if value not in distribution:\n distribution[value] = 1\n else:\n distribution[value] += 1\n \n total = len(values)\n for v in distribution.keys():\n distribution[v] /= total\n \n\n #\n # END OF YOUR CODE\n # -------------------------------------------------------------------------\n\n return distribution", "def custom_pdf(self, cum_probs, values):\n rnd_num = random()\n for p in range(len(cum_probs)):\n if rnd_num < cum_probs[p]:\n return values[p]", "def predictProbabilities(self,density ='Gaussian'):\n\t\ttestingProbs = pd.DataFrame(index=self.testing.index.values,\n\t\t\t\t\t\t\t\t\tcolumns=self.trainingMeans.index.values)\n\n\t\ttesting = self.testing.copy().drop(self.classLabel,1)\n\n\t\tdef calculateGaussian(x, mean, stdev):\n\t\t\t\"\"\"\n\t\t\tReturns the density value of a Gaussian distribution\n\t\t\t\"\"\"\n\t\t\texponent = math.exp(-(math.pow(x-mean,2)/(2*math.pow(stdev,2))))\n\t\t\tvalue= (1 / (math.sqrt(2*math.pi) * stdev)) * exponent\n\t\t\tif value==0:\n\t\t\t\treturn np.nan\n\t\t\telse:\n\t\t\t\treturn math.log(value)\n\n\t\tdef calculateBernoulli(x, mean, stdev):\n\t\t\t\"\"\"\n\t\t\tReturns the density value of a Bernoulli distribution\n\t\t\t\"\"\"\n\t\t\tif x:\n\t\t\t\tprob = mean\n\t\t\telse:\n\t\t\t\tprob = 1-mean\n\t\t\treturn prob\n\n\t\tdef calculateMultinoulli(x, *series):\n\t\t\t\"\"\"\n\t\t\tReturns the density value of a Multinoulli distribution\n\t\t\t\"\"\"\n\t\t\tseries= series[0]\n\t\t\treturn series.ix[x]/float(series.sum())\n\n\t\tif density=='Multinoulli':\n\t\t\t#Redefine the parameters to be conditional means\n\t\t\tfor each in self.params.columns:\n\t\t\t\tfor el in self.params.index:\n\t\t\t\t\tmultiDF = pd.Series(index=self.data[each].unique())\n\t\t\t\t\tcounts = self.training[self.training[self.classLabel]==el][each].value_counts()\n\t\t\t\t\tself.params.ix[el][each] = (pd.concat([multiDF,counts],1).drop(0,1),)\n\t\t\tpdf = calculateMultinoulli\n\t\telif density == 'Bernoulli':\n\t\t\tpdf =calculateBernoulli\n\t\telse:\n\t\t\tpdf = calculateGaussian\n\n\t\tprint \"Note: Assuming features follow a \"+density+\" distribution\"\n\n\t\tfor el in testingProbs.columns:\n\t\t\t#Retrieve parameters of distribution\n\t\t\tparameters = self.params.ix[el]\n\t\t\tprobabilities = self.testing.copy().drop(self.classLabel,1)\n\n\t\t\t#For each feature, compute the likelihood of class being el\n\t\t\tfor each in probabilities.columns:\n\t\t\t\t#Skip features with 0 standard deviation\n\t\t\t\tif each in self.useless_features:\n\t\t\t\t\tcontinue\n\t\t\t\tprobabilities[each] = probabilities[each].apply(lambda x: pdf(x,*parameters[each]))\n\n\t\t\t#Multiply features together with prior\n\t\t\ttestingProbs[el] = math.log(self.priors.ix[el])+probabilities.sum(1)\n\t\t\t#testingProbs[el] = self.priors.ix[el]*probabilities.prod(1)\n\t\t#Use log-sum-exp trick. We need the offsetting factor as max among classLabels\n\t\tB = testingProbs.max(1)\n\t\t#Compute log_sum = log(\\sigma_c' exp(b_c' - B)) + B\n\t\tlog_sum = testingProbs.apply(lambda t: (t-B)).applymap(lambda u: math.exp(u)).sum(1).apply(math.log)+B\n\t\tself.testingProbs = testingProbs.apply(lambda x: x-log_sum)\n\t\t#self.testingProbs = testingProbs" ]
[ "0.7904561", "0.73715144", "0.72693807", "0.695215", "0.6619722", "0.6580916", "0.6514408", "0.64730096", "0.6420018", "0.64060086", "0.637983", "0.6354253", "0.628815", "0.6220043", "0.6208732", "0.6163211", "0.6121466", "0.6121466", "0.6105896", "0.61026055", "0.6094485", "0.6073716", "0.60729355", "0.60403407", "0.60387903", "0.6031136", "0.60179216", "0.5979941", "0.59723604", "0.59453964" ]
0.77205503
1
Gaussian copula can sample after being fit with a constant column. This process will raise warnings when computing the covariance matrix
def test_sample_constant_column(self): # Setup instance = GaussianMultivariate() X = np.array([ [1.0, 2.0], [1.0, 3.0], [1.0, 4.0], [1.0, 5.0] ]) instance.fit(X) # Run result = instance.sample(5) # Check assert result.shape == (5, 2) results = result[~result.isna()].all() assert results.all() assert result.loc[:, 0].equals(pd.Series([1.0, 1.0, 1.0, 1.0, 1.0], name=0)) # This is to check that the samples on the non constant column are not constant too. assert len(result.loc[:, 1].unique()) > 1 covariance = instance.covariance assert (~pd.isna(covariance)).all().all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_covariance(self):\n raise RuntimeError(\"Internal cosmosis error in SingleValueGaussianLikelihood\")", "def extract_covariance(self, block):\n raise RuntimeError(\"You need to implement the method \"\n \"'extract_covariance' if you set constant_covariance=False \"\n \"in a gaussian likelihood\")", "def test_fit_default_distribution(self):\n\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data)\n\n for i, key in enumerate(self.data.columns):\n assert copula.columns[i] == key\n assert copula.univariates[i].__class__ == GaussianUnivariate\n assert copula.univariates[i]._params['loc'] == self.data[key].mean()\n assert copula.univariates[i]._params['scale'] == np.std(self.data[key])\n\n expected_covariance = copula._get_covariance(self.data)\n assert (copula.covariance == expected_covariance).all().all()", "def test_fit_numpy_array(self):\n # Setup\n copula = GaussianMultivariate(\n distribution='copulas.univariate.gaussian.GaussianUnivariate')\n\n # Run\n copula.fit(self.data.to_numpy())\n\n # Check\n for key, (column, univariate) in enumerate(zip(self.data.columns, copula.univariates)):\n assert univariate._params['loc'] == np.mean(self.data[column])\n assert univariate._params['scale'] == np.std(self.data[column])\n\n expected_covariance = copula._get_covariance(pd.DataFrame(self.data.to_numpy()))\n assert (copula.covariance == expected_covariance).all().all()", "def test__get_covariance(self):\n # Setup\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data)\n\n expected_covariance = np.array([\n [1., -0.01261819, -0.19821644],\n [-0.01261819, 1., -0.16896087],\n [-0.19821644, -0.16896087, 1.]\n ])\n\n # Run\n covariance = copula._get_covariance(self.data)\n\n # Check\n assert np.isclose(covariance, expected_covariance).all().all()", "def build_covariance(self):\n raise RuntimeError(\"Your Gaussian covariance code needs to \"\n \"over-ride the build_covariance method so it knows how to \"\n \"load the data covariance (or set constant_covariance=False and \"\n \"over-ride the extract_covariance method)\")\n\n #using info in self.options,\n #like filenames etc,\n #build covariance", "def autocov(x, **kwargs):\r\n # only remove the mean once, if needed\r\n debias = kwargs.pop('debias', True)\r\n axis = kwargs.get('axis', -1)\r\n if debias:\r\n x = remove_bias(x, axis)\r\n kwargs['debias'] = False\r\n return crosscov(x, x, **kwargs)", "def autocov(x, **kwargs):\n\t# only remove the mean once, if needed\n\tdebias = kwargs.pop('debias', True)\n\taxis = kwargs.get('axis', -1)\n\tif debias:\n\t\tx = _remove_bias(x, axis)\n\tkwargs[ 'debias' ] = False\n\treturn crosscov(x, x, **kwargs)", "def test_fit_distribution_arg(self):\n # Setup\n distribution = 'copulas.univariate.gaussian_kde.GaussianKDE'\n copula = GaussianMultivariate(distribution=distribution)\n\n # Run\n copula.fit(self.data)\n\n # Check\n assert copula.distribution == 'copulas.univariate.gaussian_kde.GaussianKDE'\n\n for i, key in enumerate(self.data.columns):\n assert copula.columns[i] == key\n assert get_qualified_name(copula.univariates[i].__class__) == copula.distribution\n\n expected_covariance = copula._get_covariance(self.data)\n assert (copula.covariance == expected_covariance).all().all()", "def gauss_sample(mean, covariance):\n\n return None", "def calculate_covariance(self, x):\n # tx = self.reshape_tensor2d(x)\n # Calcualte the covariance\n # tx_mean = K.mean(tx, axis=0)\n # return tx_mean\n # tx_normal = tx - tx_mean\n # return tx_normal\n # tx_cov = K.dot(tx_normal.T, tx_normal) / (self.cols * self.rows - 1)\n # return tx_cov\n raise DeprecationWarning(\"deprecated, should use calculate_pre_cov to do 4D direct computation\")", "def cdf(self, x):\n from scipy.special import betainc\n sq_x = x * x\n return np.where(\n sq_x < 1., betainc(self.m / 2.0, self.n / 2.0, sq_x),\n np.ones_like(x))", "def multivariate_gauss_prob(observed, mean, covariance):\n\n return None", "def compute_measurement_covariance(jacobian, oldCovariance, sigmaObservation): \n\n return None", "def estimateCovariance(df):\n import numpy as np\n m = df.select(df['scaledFeatures']).map(lambda x: x[0]).mean()\n dfZeroMean = df.select(df['scaledFeatures']).map(lambda x: x[0]).map(lambda x: x-m) # subtract the mean\n\n return dfZeroMean.map(lambda x: np.outer(x,x)).sum()/df.count()", "def test_gaussian_basis_hon(self):\n def row_generator():\n return [random.gauss(0, 1) for i in range(self.d)]\n\n self._test_sample_basis_hon(row_generator)", "def test_sample(self, normal_mock):\n # Setup\n instance = GaussianMultivariate(GaussianUnivariate)\n data = pd.DataFrame([\n {'A': 25, 'B': 75, 'C': 100},\n {'A': 30, 'B': 60, 'C': 250},\n {'A': 10, 'B': 65, 'C': 350},\n {'A': 20, 'B': 80, 'C': 150},\n {'A': 25, 'B': 70, 'C': 500}\n ])\n instance.fit(data)\n\n normal_mock.return_value = np.array([\n [0.1, 0.1, 0.1],\n [0.2, 0.2, 0.2],\n [0.4, 0.4, 0.4],\n [0.6, 0.6, 0.6],\n [0.8, 0.8, 0.8]\n ])\n\n expected_result = pd.DataFrame([\n {'A': 22.678232998312527, 'B': 70.70710678118655, 'C': 284.35270009440734},\n {'A': 23.356465996625055, 'B': 71.41421356237309, 'C': 298.7054001888146},\n {'A': 24.712931993250110, 'B': 72.82842712474618, 'C': 327.4108003776293},\n {'A': 26.069397989875164, 'B': 74.24264068711929, 'C': 356.116200566444},\n {'A': 27.425863986500215, 'B': 75.65685424949238, 'C': 384.8216007552586}\n ])\n\n # Run\n result = instance.sample(5)\n\n # Check\n assert result.equals(expected_result)\n\n assert normal_mock.called_once_with(\n np.zeros(instance.covariance.shape[0]),\n instance.covariance,\n 5\n )", "def gauss_convolve(array, sigma):\r\n\t##remove singleton dimesions and make sure values are floats\r\n\tarray = array.squeeze().astype(float)\r\n\t##allocate memory for result\r\n\tresult = np.zeros(array.shape)\r\n\t##if the array is 2-D, handle each trial separately\r\n\ttry:\r\n\t\tfor trial in range(array.shape[1]):\r\n\t\t\tresult[:,trial] = gaussian_filter(array[:, trial], sigma = sigma, order = 0, mode = \"constant\", cval = 0.0)\r\n\t##if it's 1-D:\r\n\texcept IndexError:\r\n\t\tif array.shape[0] == array.size:\r\n\t\t\tresult = gaussian_filter(array, sigma = sigma, order = 0, mode = \"constant\", cval = 0.0)\r\n\t\telse:\r\n\t\t\tprint \"Check your array dimenszions!\"\r\n\treturn result", "def test_cumulative_distribution_fit_call_np_array(self):\n # Setup\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data.to_numpy())\n X = np.array([2000., 200., 1.])\n expected_result = 0.4550595153746892\n\n # Run\n result = copula.cumulative_distribution(X)\n\n # Check\n assert np.isclose(result, expected_result, atol=1e-5).all().all()", "def rand_cov():\n c = uniform(-1, 1)\n return [[uniform(0, 1), c], [c, uniform(0, 1)]]", "def cov_matrix(X, mu):\n m, n = X.shape\n X_minus_mu = X - mu\n sigma = (1 / m) * (X_minus_mu.T).dot(X_minus_mu)\n\n return sigma", "def test_fit_distribution_selector(self):\n copula = GaussianMultivariate(distribution={\n 'column1': 'copulas.univariate.beta.BetaUnivariate',\n 'column2': 'copulas.univariate.gaussian_kde.GaussianKDE',\n })\n copula.fit(self.data)\n\n assert get_qualified_name(\n copula.univariates[0].__class__) == 'copulas.univariate.beta.BetaUnivariate'\n assert get_qualified_name(\n copula.univariates[1].__class__) == 'copulas.univariate.gaussian_kde.GaussianKDE'\n assert get_qualified_name(\n copula.univariates[2].__class__) == 'copulas.univariate.base.Univariate'", "def distribucion_slip( C, mu, N ):\n\n n_cols_cova = np.shape( C )[0] # C es una matriz cuadrada de dim x dim \n\n dim_mu = np.shape( mu ) # dimensiones de mu, para volver a rearmar matriz de slip\n mu = np.reshape( mu, ( n_cols_cova, ) )\n # se calculan los valores y vectores propios de la matriz de covarianza\n eig_val, eig_vecs = la.eig( C )\n eig_val = eig_val.real # valores propios (lambda_k)\n eig_vecs = eig_vecs.real # vectores propios (v_k) (columnas de eig_vecs)\n\n z = np.random.normal( 0, 1, n_cols_cova ) # distribucion gaussiana aleatoria z~N(0,1)\n\n # iniciacion array de slip\n S = np.ones( ( n_cols_cova, N ) )\n for i in range(N):\n S[:,i] = z[i]*np.sqrt( np.abs(eig_val[i]) )*np.real(eig_vecs[:,i])\n S = np.multiply( mu, np.exp( np.sum( S, axis = 1 ) ) )\n S = np.reshape( S, dim_mu )\n return S", "def test_cumulative_distribution_fit_df_call_np_array(self):\n # Setup\n copula = GaussianMultivariate(GaussianUnivariate)\n copula.fit(self.data)\n X = np.array([2000., 200., 1.])\n expected_result = 0.4550595153746892\n\n # Run\n result = copula.cumulative_distribution(X)\n\n # Check\n assert np.isclose(result, expected_result, atol=1e-5).all().all()", "def make_cov_cholesky(kernel: GPy.kern.Kern) -> np.ndarray:\n # Remark: Since we are doing wavetable synthesis, it is necessary to\n # consider periodic / non-periodic kernels separately in order to ensure\n # good continuation.\n samples = 44100 / 20\n xs = np.arange(samples + 1) * 2. * np.pi / samples\n if isinstance(kernel, GPy.kern.PeriodicExponential.__bases__[0]) or not config.good_continuation_regression:\n # print('Is periodic')\n X = np.array([xs[0]])[:, None]\n Y = np.array([0.])[:, None]\n else:\n X = np.array([xs[0], xs[-1]])[:, None]\n Y = np.array([0., 0.])[:, None]\n m = GPy.models.GPRegression(X, Y, kernel)\n m.Gaussian_noise = 0.0\n mean, cov = m.predict_noiseless(xs[:, None], full_cov=True)\n chol = GPy.util.linalg.jitchol(cov)\n return chol", "def covariance_regularization(self):\n return self._covariance_regularization", "def postfit_covariance(self) -> NONEARRAY:\n pass", "def compute_initial_covariance(jacobian, sigmaObservation):\n\n return None", "def postfit_covariance(self) -> NONEARRAY:\n return self._calc_covariance()", "def covariance(data_matrix):\n return np.asmatrix(np.cov(data_matrix, rowvar=0))" ]
[ "0.6819969", "0.66848814", "0.6387297", "0.63568807", "0.62007195", "0.61540526", "0.6126813", "0.6081666", "0.5988317", "0.5985816", "0.58470076", "0.57394606", "0.56781876", "0.56455225", "0.5579945", "0.5579599", "0.5552339", "0.5549332", "0.55221725", "0.55005264", "0.5462825", "0.546187", "0.5461743", "0.5460711", "0.5457115", "0.54508686", "0.54413354", "0.5441275", "0.543745", "0.5428301" ]
0.7020414
0
The User cannot create a month that already exist.
def clean(self, *args, **kwargs): name = self.cleaned_data.get('name') if name in Month.objects.values_list('name', flat=True): raise forms.ValidationError(f"The month of {name} already exist") return super(MonthForm, self).clean(*args, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_not_creator_cannot_update_tab(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('group_view', args=(self.group.pk,))\n\n utils.test_cannot_access(self, self.url, expected_url, self.data)", "def test_not_creator_cannot_update(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url,\n expected_url=expected_url,\n data=self.data)", "def test_08_create_user_not_exists(self):\n\n _, user = self.get_random_item(models.User)\n utils.create_user(user, session=self.session)\n success, error = utils.create_user(user, session=self.session)\n self.assertFalse(success)\n self.assertTrue(error)", "def test_create_user_invalid_id(self):\r\n print(\"Create user invalid id (already taken)\")\r\n u_id = 100\r\n username = \"newtestuser\"\r\n password = \"test9999\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_error_user_already_exists(self):\n User.objects.create_user(self.data)\n client = Client()\n client.post('/register/', self.data)\n self.assertRaisesMessage(ValueError, 'user already exists')", "def test_invalid_month_orig(self):\n year, month, error = clean_year_month(2014, 3, 13)\n self.assertEqual(year, 2014)\n self.assertEqual(month, timezone.localtime(timezone.now()).month)\n self.assertEqual(error, ERROR)", "def test_monthly_report_error(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n res = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token),\n data=self.expense)\n self.assertEqual(res.status_code, 201)\n rv = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=\n {'name': 'soda', 'amount': 200, 'date_of_expense': '10-01-2021'})\n month = 4567\n res = self.client().get(f'/monthly_report?month={month}', headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(res.status_code, 400)\n results = json.loads(res.data)\n self.assertEqual(results['message'], f'The date {month} does not match the format MM-YYYY')", "def test_milestone_add_error_already_exists(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('milestone add milestone1 \"%s\"'\n % self._test_date)\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def get_month():\n return handle_invalid_inputs(question_3, months)", "def test_user_not_in_group_cannot_create_tab(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url, expected_url)\n self.assertEqual(len(Tab.objects.all()), 0)", "def test_new_user_invalid_email(self):\n user_number_before = get_user_model().objects.count()\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(\n None,\n password=\"1234Test\"\n )\n user_number_after = get_user_model().objects.count()\n self.assertEqual(user_number_before, user_number_after)", "def test_create_activity_check_not_duplicate_activity(self):\n from .mockers import user_status as activity\n username = 'messi'\n self.create_user(username)\n self.testapp.post('/people/%s/activities' % username, json.dumps(activity), oauth2Header(test_manager), status=201)\n self.testapp.post('/people/%s/activities' % username, json.dumps(activity), oauth2Header(test_manager), status=200)", "def test_interval_load_duplicate_name_raises(self, months):\n register = NDimensionalRegister()\n register.register(IntervalSet(\"months\", months))\n with raises(ValueError):\n register.register(IntervalSet(\"months\", months))", "def test_create_user_invalid_type(self):\r\n print(\"Create user invalid type\")\r\n u_id = 3\r\n username = \"newtestuser\"\r\n password = \"password\"\r\n u_type = 5\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_not_creator_cannot_delete_tab(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('group_view', args=(self.group.pk,))\n\n utils.test_cannot_access(self, self.url, expected_url)\n self.assertEqual(len(Tab.objects.all()), 1)", "def test_generate_03_raise_exception(self):\n move = self.get_new_move(3)\n form_wizard = Form(self.env['stock.assign.serial'].with_context(\n default_move_id=move.id,\n default_next_serial_number='code-xxx',\n ))\n wiz = form_wizard.save()\n with self.assertRaises(UserError):\n wiz.generate_serial_numbers()\n\n form_wizard.next_serial_count = 0\n # Must raise an exception because `next_serial_count` must be greater than 0.\n with self.assertRaises(ValidationError):\n form_wizard.save()", "def test_create_user_invalid_username(self):\r\n print(\"Create user invalid username (already taken)\")\r\n u_id = 3\r\n username = \"100\"\r\n password = \"test9999\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_user_recreate(self):\n self.test_user_creation()\n\n with self.assertRaisesMessage(\n IntegrityError, \"UNIQUE constraint failed: auths_user.username\"\n ):\n UserModel.objects.create_user(\n username=\"saimer\"\n )", "def test_user_not_in_group_cannot_update_tab(self):\n\n utils.create_user_and_authenticate(self)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url, expected_url, self.data)", "def error_imaginary_date(user: discord.User, date_arg: str) -> str:\n return (\n f\"{user.mention}, you might need to check you're calendar!\"\n f\" '{date_arg}' doesn't exist!\"\n )", "def test_member_already_exists(self):\n self.login_as(\"bob\")\n\n with self.assertNumQueries(6):\n response = self.client.post(self.url, self.payload)\n self.assert_validation_failed(response, data={\n \"non_field_errors\": [\"You are already a member of this group.\"]\n })\n self.assertEqual(Membership.objects.count(), self.num_memberships)", "def test_month_from_number_raises_error_if_not_1_to_12(self):\r\n\r\n with self.assertRaises(ValueError, msg=\"Should raise a value error if month is 0\") as ctx:\r\n cds.month_from_number(0)\r\n with self.assertRaises(ValueError, msg=\"Should raise a value error if month is 13\") as ctx:\r\n cds.month_from_number(13)\r\n with self.assertRaises(ValueError, msg=\"Should raise a value error if month is 110\") as ctx:\r\n cds.month_from_number(110)", "def test_duplicate_user(self, mapp, existing_user_id):\n\n mapp.create_user(user=existing_user_id, password=1234,\n email=existing_user_id + \"@example.com\", code=409)", "def test_not_creator_cannot_delete(self):\n\n logged_user = utils.create_user_and_authenticate(self)\n self.group.users.add(logged_user)\n expected_url = reverse('my_groups_view')\n\n utils.test_cannot_access(self, self.url, expected_url)\n self.assertEqual(len(Group.objects.all()), 1)", "def test_registration_when_user_already_exists(self):\n # register the user the first time\n self.register_user()\n # register the same user the second time\n result = self.client().post(AuthTestCase.registration, data=self.user)\n response_result = json.loads(result.data.decode())\n self.assertEqual(result.status_code, 409)\n self.assertEqual(response_result['message'], \"user already exists\")", "def test_create_form_already_exists(self):\n\n self._create_test_survey()\n with pytest.raises(SurveyFormNameAlreadyExists):\n self._create_test_survey()", "def test_dont_create_user(self):\n self.assertFalse(User.objects.exists())", "def test_save_slot_site_1(self):\n business = BUSINESS_FACTORY.create_business()\n with self.assertRaises(ValidationError) as context_manager:\n Slot.objects.create(site_id=1, business_id=business.id,\n start_date = datetime.date.today(),\n end_date = datetime.date.today() + datetime.timedelta(1))\n self.fail('Invalid slot saved.')\n LOG.debug(context_manager.exception)", "def check_dates(self, kwargs):\n month = int(kwargs['month'])\n if (int(kwargs['year_from']) >= int(kwargs['year_to'])) or \\\n (month < 1 or month > 12):\n # kdyby datumy byly nejake dodrbane, tak se sverime do pece autoredirectu\n return HttpResponseRedirect(reverse('admin_redir'))\n return None", "def test_as_ignores_mau(self) -> None:\n\n # Create and sync so that the MAU counts get updated\n token1 = self.create_user(\"kermit1\")\n self.do_sync_for_user(token1)\n token2 = self.create_user(\"kermit2\")\n self.do_sync_for_user(token2)\n\n # check we're testing what we think we are: there should be two active users\n self.assertEqual(self.get_success(self.store.get_monthly_active_count()), 2)\n\n # We've created and activated two users, we shouldn't be able to\n # register new users\n with self.assertRaises(SynapseError) as cm:\n self.create_user(\"kermit3\")\n\n e = cm.exception\n self.assertEqual(e.code, 403)\n self.assertEqual(e.errcode, Codes.RESOURCE_LIMIT_EXCEEDED)\n\n # Cheekily add an application service that we use to register a new user\n # with.\n as_token = \"foobartoken\"\n self.store.services_cache.append(\n ApplicationService(\n token=as_token,\n id=\"SomeASID\",\n sender=\"@as_sender:test\",\n namespaces={\"users\": [{\"regex\": \"@as_*\", \"exclusive\": True}]},\n )\n )\n\n self.create_user(\"as_kermit4\", token=as_token, appservice=True)" ]
[ "0.60135305", "0.58874893", "0.5800531", "0.579681", "0.5655037", "0.5562427", "0.55525297", "0.55446094", "0.5531289", "0.5445164", "0.54375124", "0.54319006", "0.54038316", "0.538087", "0.5334421", "0.5327298", "0.531028", "0.5302743", "0.53004247", "0.5300003", "0.5290643", "0.52841127", "0.5263896", "0.5263074", "0.5259451", "0.5248742", "0.5237648", "0.52317095", "0.5220796", "0.5219653" ]
0.6488986
0
Test the help file
def test_help(self): help_file = os.path.join(cwd, indir, "r5json_help") help_text = StringIO() with redirect_stdout(help_text): with self.assertRaises(HelpPrinted): main(["--help"]) if os.path.exists(help_file): with open(help_file) as f: expected = f.read() self.assertEqual(expected.strip(), help_text.getvalue().strip()) else: with open(help_file, 'w') as f: f.write(help_text.getvalue()) self.fail(f"{help_file} created - rerun test")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_help(self):\n help_file = os.path.join(cwd, indir, \"rdfc_help\")\n help_text = StringIO()\n with redirect_stdout(help_text):\n with self.assertRaises(HelpPrinted):\n main([\"--help\"])\n if os.path.exists(help_file):\n with open(help_file) as f:\n expected = f.read()\n self.assertEqual(expected.strip(), help_text.getvalue().strip())\n else:\n with open(help_file, 'w') as f:\n f.write(help_text.getvalue())\n self.fail(f\"{help_file} created - rerun test\")", "def test_help_menu(run):\n out, _err = run(dork.cli.help_menu)\n assert 'Help' in out, 'Help wasnt found'", "def help():", "def test_help(self):\n rc, stdout, _, msg = OIM().request('--help')\n self.assertEqual(rc, 0, \"Bad return code when requesting help\\n%s\" % msg)\n self.assert_(re.search(r'[Uu]sage:', stdout), msg)", "def test_help_ok(self):\n from trac import __version__\n\n test_name = sys._getframe().f_code.co_name\n d = {'version': __version__,\n 'date_format_hint': get_date_format_hint()}\n expected_results = self.expected_results[test_name] % d\n rv, output = self._execute('help')\n self.assertEqual(0, rv)\n self.assertEqual(expected_results, output)", "def test_handle_help(self):\r\n ret, code = self.testcommand.handle(\"project help\", user)\r\n self.assertEqual(ret, self.testcommand.get_help())\r\n self.assertEqual(code, 200)", "def test_help(self):\n run_nbgrader([\"generate_feedback\", \"--help-all\"])", "def help(self):", "def help(self):", "def help():\n \n pass", "def test_help(self):\n run_nbgrader([\"quickstart\", \"--help-all\"])", "def test_handle_help(self):\n ret, code = self.testcommand.handle(\"team help\", user)\n self.assertEqual(ret, self.testcommand.get_help())\n self.assertEqual(code, 200)", "def test_help(self):\n bin_path = \"tools/drake_visualizer\"\n self.assertTrue(isfile(bin_path), bin_path)\n text = subprocess.check_output([bin_path, \"--help\"], encoding=\"utf8\")\n\n # N.B. This should be kept in sync with\n # `drake_visualizer_installed_help_test`.\n print(text)\n # Test for nominal help string.\n self.assertIn(\"usage: drake-visualizer \", text)\n self.assertNotIn(\n \"drake-visualizer: error: unrecognized arguments\", text)\n # Test for modifications in help text.\n self.assertIn(\"--use_builtin_scripts\", text)\n self.assertIn(\"Options: all,\", text)", "def test_cli_help(self):\n output = self.update_command('-h')", "def show_help():\n pass", "def quick_test():\n do_command('Help: Command=Help')\n do_command('Help: Command=\"GetInfo\"')\n #do_command('SetPreference: Name=GUI/Theme Value=classic Reload=1')", "def test_help(self):\n\n process = subprocess.Popen(\n [\"python\", \"./echo.py\", \"-h\"],\n stdout=subprocess.PIPE)\n stdout, _ = process.communicate()\n usage = open(\"./USAGE\", \"r\").read()\n\n self.assertEquals(stdout, usage)", "def help():\n print(UI.HELP)", "def test_cli_help(run):\n msg = []\n _, err = run(dork.cli.the_predork_cli, msg, *(\"\", \"-h\"))\n assert \"usage: \" in msg[0], \\\n \"Failed to run the cli.main method: {err}\".format(err=err)", "def help(self):\n pass", "def help(self):\n pass", "def printhelp():", "def test_cli_help(run):\n\n out, err, mocked_input = run(dork.cli.main, \"-h\")\n assert \"usage:\" in out\n assert err == \"\"\n assert mocked_input.call_count == 0", "def test_cli_help():\n runner = CliRunner()\n result = runner.invoke(main, [\"--help\"], terminal_width=80)\n assert result.exit_code == 0\n assert \"Usage: duffy\" in result.output", "def test_generate_help_text(self):\n self.shell.completer = None\n description, example = self.shell.generate_help_text('')\n self.assertEqual(description, '')\n self.assertEqual(example, '')\n\n self.shell.completer = TestCompleter()\n description, example = self.shell.generate_help_text('friendship --calls')\n self.assertEqual(description, '--calls:\\n' + 'call the friends')\n self.assertEqual(example, space_examples('use with care', 25, 1))", "def test_help(self):\n\n # Run the command `python ./echo.py -h` in a separate process, then\n # collect its output.\n process = subprocess.Popen(\n [\"python\", \"./echo.py\", \"-h\"],\n stdout=subprocess.PIPE)\n stdout, _ = process.communicate()\n usage = open(\"./USAGE\", \"r\").read()\n\n self.assertEquals(stdout, usage)", "def testHelp(self):\n d = self.runCommand('?')\n\n helpText = cftp.StdioClient(None).cmd_HELP('').strip()\n if isinstance(helpText, unicode):\n helpText = helpText.encode(\"utf-8\")\n d.addCallback(self.assertEqual, helpText)\n return d", "def test_help(self):\n run_nbgrader([\"fetch\", \"--help-all\"])", "def test_help(self):\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"help\")\n s = \"\"\"\nDocumented commands (type help <topic>):\n========================================\nEOF all count create destroy help quit show update\n\n\"\"\"\n self.assertEqual(s, f.getvalue())", "def help(self):\n\t\treturn" ]
[ "0.8088576", "0.8038368", "0.8033258", "0.78555655", "0.78202975", "0.78139514", "0.7774096", "0.7725733", "0.7725733", "0.7713123", "0.770867", "0.7704543", "0.763082", "0.7591885", "0.75887424", "0.75694066", "0.7549705", "0.75341034", "0.75311893", "0.75053424", "0.75053424", "0.7489418", "0.74841785", "0.7465808", "0.7435661", "0.74330235", "0.7430683", "0.742746", "0.74125093", "0.7410902" ]
0.8042502
1
Download fname from the FHIR server and save it in target_directory if necessary. If it already exists, just use it
def from_web(self, fname: str, typ: str, target_directory: str) -> str: target = os.path.join(target_directory, fname) if not os.path.exists(target): # f_url = FHIR_SERVER + typ + '/' + fname f_url = FHIR_SERVER + fname resp = requests.get(f_url) if resp.ok: with open (target, 'w') as f: f.write(resp.text) else: self.fail(f"{f_url}: {resp.reason}") return target
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unavoidable_download_method(self, target, name):\n # Get path to file\n file_path = os.path.join(self.work_dir, name)\n\n # Create necessary directories if not present\n self.mkdir_p(self.work_dir)\n\n # Check if file exists, download if not presente\n if not os.path.exists(file_path):\n try:\n subprocess.check_call(['curl', '-fs', self.input_urls[name], '-o', file_path])\n except subprocess.CalledProcessError:\n raise RuntimeError('\\nNecessary file could not be acquired: {}. Check input URL')\n except OSError:\n raise RuntimeError('Failed to find \"curl\". Install via \"apt-get install curl\"')\n\n assert os.path.exists(file_path)\n\n # Update FileStoreID\n target.updateGlobalFile(self.ids[name], file_path)\n\n return file_path", "def download_file(fname, temp_dir, force=False):\n target_dir = temp_dir\n target_fname = os.path.join(target_dir, fname)\n\n if force or not os.path.isfile(target_fname):\n url = urljoin(datasets_url, fname)\n with urllib.request.urlopen(url) as response, open(target_fname, 'wb') as out_file:\n logger.info(str(url) + ' --> ' + target_fname)\n shutil.copyfileobj(response, out_file)\n\n return target_fname", "def download_target_url(target, fname):\n r = requests.get(target)\n if r.ok:\n with open(fname, 'w') as f:\n f.write(r.text)\n print(f\"Wrote {len(r.text)} chars to {fname}.\")", "def _maybe_download(self, filename, work_directory):\n if not os.path.exists(work_directory):\n os.mkdir(work_directory)\n filepath = os.path.join(work_directory, filename)\n if not os.path.exists(filepath):\n filepath, _ = urllib.urlretrieve(self.url + filename, filepath)\n statinfo = os.stat(filepath)\n log.info('Successfully downloaded', filename, statinfo.st_size,\n 'bytes.')\n return filepath", "def download(self, dest, overwrite=False):\n dest = os.path.abspath(dest)\n try:\n local = get_local(dest)\n except ValueError: # Nothing exists at dest, nothing to worry about.\n local = None\n else: # Something exists here.\n if local.hash() == self.hash: # Nothing to update.\n pdbox.info(\"%s and %s are identical\" % (self.uri, local.path))\n return\n if not overwrite:\n raise ValueError(\"%s already exists\" % local.path)\n\n # To avoid any weird overwriting behaviour in the case of errors, we'll\n # download to a different location first, then move to dest afterwards.\n tmp_dest = os.path.join(\n pdbox.TMP_DOWNLOAD_DIR,\n os.path.basename(dest),\n )\n while os.path.exists(tmp_dest): # Make sure the temp name is unique.\n tmp_dest += \"_\"\n\n if pdbox._args.get(\"dryrun\"):\n pdbox.info(\"Downloaded %s to %s\" % (self.uri, dest))\n return None\n\n # TODO: Progress bars.\n meta = execute(pdbox.dbx.files_download_to_file, tmp_dest, self.path)\n pdbox.debug(\"Metadata response: %s\" % meta)\n\n if not os.path.isdir(os.path.dirname(dest)):\n # Create the parent directories of dest.\n os.makedirs(os.path.dirname(dest))\n\n if not pdbox._args.get(\"dryrun\"):\n # os.rename overwrites files just fine, but not directories.\n if local and isinstance(local, LocalFolder):\n shutil.rmtree(local.path)\n # Move the file from the temp location to dest.\n os.rename(tmp_dest, dest)\n\n pdbox.info(\"Downloaded %s to %s\" % (self.uri, dest))\n return LocalFile(dest) # Return the newly created file.", "def maybe_download(filename, work_directory, source_url):\n\tif not gfile.Exists(work_directory):\n\t\tgfile.MakeDirs(work_directory)\n\tfilepath = os.path.join(work_directory, filename)\n\tif not gfile.Exists(filepath):\n\t\ttemp_file_name, _ = urlretrieve_with_retry(source_url)\n\t\tgfile.Copy(temp_file_name, filepath)\n\t\twith gfile.GFile(filepath) as f:\n\t\t\tsize = f.size()\n\t\tprint('Successfully downloaded', filename, size, 'bytes.')\n\treturn filepath", "def maybe_download(filename):\n\n if not tf.gfile.Exists(WORK_DIRECTORY):\n tf.gfile.MakeDirs(WORK_DIRECTORY)\n filepath = os.path.join(WORK_DIRECTORY, filename)\n if not tf.gfile.Exists(filepath):\n filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)\n with tf.gfile.GFile(filepath) as f:\n size = f.Size()\n print('Successfully downloaded', filename, size, 'bytes.')\n return filepath", "def maybe_download(filename):\n if not tf.gfile.Exists(WORK_DIRECTORY):\n tf.gfile.MakeDirs(WORK_DIRECTORY)\n filepath = os.path.join(WORK_DIRECTORY, filename)\n if not tf.gfile.Exists(filepath):\n filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)\n with tf.gfile.GFile(filepath) as f:\n size = f.size()\n print('Successfully downloaded', filename, size, 'bytes.')\n return filepath", "def maybe_download(filename):\n if not tf.gfile.Exists(WORK_DIRECTORY):\n tf.gfile.MakeDirs(WORK_DIRECTORY)\n filepath = os.path.join(WORK_DIRECTORY, filename)\n if not tf.gfile.Exists(filepath):\n filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)\n with tf.gfile.GFile(filepath) as f:\n size = f.size()\n print('Successfully downloaded', filename, size, 'bytes.')\n return filepath", "def download(url, fname, directory):\n if not os.path.exists(directory):\n print(\"Creating directory %s\" % directory)\n os.mkdir(directory)\n else:\n print(\"Directory exists: %s\" % directory)\n filepath = os.path.join(directory, fname)\n if not os.path.exists(filepath):\n print(\"Downloading %s to %s\" % (fname, filepath))\n local_fname, _ = request.urlretrieve(url + fname, filepath)\n statinfo = os.stat(filepath)\n print(\"Successfully downloaded %s bytes %s\\n\" % (fname, statinfo.st_size))\n else:\n print(\"File %s exists in %s\\n\" % (fname, filepath))\n return filepath", "def download_if_not_exist(self):\n for (fname, furl) in cornell_file_urls:\n # dir_path = os.path.dirname(os.path.realpath(__file__))\n input_folder = '{input_dir}/cornell'.format(input_dir=self.input_dir)\n full_dirname = input_folder\n full_fname = '/'.join([full_dirname, fname])\n if not file_exists(full_fname):\n remote_file = urlopen(furl)\n data = remote_file.read()\n remote_file.close()\n # Try creating the dir\n try_create_dir(full_dirname)\n print('download if not exist fname:', fname, 'url:', furl)\n # Write the file\n with open(full_fname, 'wb') as f:\n f.write(data)", "def maybe_download(filename, work_directory):\n if not os.path.exists(work_directory):\n os.mkdir(work_directory)\n filepath = os.path.join(work_directory, filename)\n if not os.path.exists(filepath):\n filepath, _ = urlretrieve(SOURCE_URL + filename, filepath)\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n return filepath", "def download_model(source, target, filename):\n if not os.path.exists(target):\n os.mkdir(target) \n target_file = str(Path(target).joinpath(filename))\n if os.path.exists(target_file):\n print('model already exists, skipping download')\n return\n print(\"Downloading from {} to {}\".format(source, target))\n wget.download(source, target_file) \n print(\"\\nDone!\")", "def download_addon(self, url, target_path):\n try:\n filename = url.split('?')[0].rstrip('/').rsplit('/', 1)[-1]\n target_path = os.path.join(target_path, filename)\n\n print \"Downloading %s to %s\" % (url, target_path)\n urllib.urlretrieve(url, target_path)\n\n return target_path\n except Exception, e:\n print e", "def fetch(self, location=None, conn_timeout=None):\r\n if self.local and (location is None or os.path.dirname(self._url.path) == location):\r\n return self._url.path\r\n location = location or safe_mkdtemp()\r\n target = os.path.join(location, self.filename)\r\n if os.path.exists(target):\r\n return target\r\n try:\r\n with contextlib.closing(self.fh(conn_timeout=conn_timeout)) as url_fp:\r\n safe_mkdir(os.path.dirname(target))\r\n with open(target, 'wb') as fp:\r\n fp.write(url_fp.read())\r\n except (FetchError, IOError) as e:\r\n raise self.UnreadableLink('Failed to fetch %s to %s: %s' % (self.url, location, e))\r\n return target", "def download(filename, work_directory, source_url, overwrite=False):\n\n if not gfile.Exists(work_directory):\n gfile.MakeDirs(work_directory)\n\n filepath = os.path.join(work_directory, filename)\n\n if overwrite or not gfile.Exists(filepath):\n _filename, _ = urlretrieve_with_retry(source_url + filename)\n #print('_filename:', _filename)\n gfile.Copy(_filename, filepath, overwrite=overwrite)\n with gfile.GFile(filepath) as f:\n size = f.size()\n print('Successfully downloaded', filename, size, 'bytes.')\n\n return filepath", "def download_file(target_url):\n resp = requests.get(target_url)\n with tempfile.NamedTemporaryFile('wb+', delete=False) as f:\n file_name = f.name\n f.write(resp.content)\n return file_name", "def save(self, url, destination, payload={}, overwrite=False):\n head_args = self._fmt_request_args(\"GET\", self.headers, url, payload)\n head_args.pop(\"method\")\n head_args[\"verify\"] = False\n h = requests.head(**head_args)\n header = h.headers\n content_type = header.get(\"content-type\")\n\n # Figure out the local file name and check if it's available.\n local_phile_name = self._determine_save_file_name(url, content_type, destination)\n if os.path.exists(local_phile_name) and not overwrite:\n logging.error(\"File %s already exists, use carpetbag.save(overwrite=True) to overwrite.\" % local_phile_name)\n raise errors.CannotOverwriteFile\n\n # Check content length\n content_length = header.get(\"content-length\", None)\n if content_length.isdigit():\n content_length = int(content_length)\n if content_length > self.max_content_length:\n logging.warning(\"Remote content-length: %s is greater then current max: %s\")\n return False\n\n # Get the file.\n response = self.get(url, payload=payload)\n\n open(local_phile_name, \"wb\").write(response.content)\n\n return local_phile_name", "def _update_full_path(self):\n if self.download_dir and self.download_filename:\n self._download_path = os.path.join(self.download_dir,\n self.download_filename)\n else:\n self._download_path = None", "def _process_resource(self, url):\n url_parts = urlparse.urlsplit(url)\n rel_path = url_parts.path[1:]\n fs_path = os.path.join(self.fileserver_path, rel_path)\n self.logger.info('Downloading {0} to {1}'.format(url, fs_path))\n self._execute_command('curl --create-dirs -Lo {0} {1}'\n .format(fs_path, url), retries=2)\n url = url.replace(url_parts.netloc, self.fs_base_url)\n url = url.replace(url_parts.scheme, 'http')\n return url", "def download_target(self, target, destination_directory):\n\n # Do the arguments have the correct format? \n # This check ensures the arguments have the appropriate \n # number of objects and object types, and that all dict\n # keys are properly named.\n # Raise 'tuf.FormatError' if the check fail.\n tuf.formats.TARGETFILE_SCHEMA.check_match(target)\n tuf.formats.PATH_SCHEMA.check_match(destination_directory)\n\n # Extract the target file information.\n target_filepath = target['filepath']\n trusted_length = target['fileinfo']['length']\n trusted_hashes = target['fileinfo']['hashes']\n\n # get_target_file checks every mirror and returns the first target\n # that passes verification.\n target_file_object = self.get_target_file(target_filepath, trusted_length,\n trusted_hashes)\n \n # We acquired a target file object from a mirror. Move the file into\n # place (i.e., locally to 'destination_directory').\n destination = os.path.join(destination_directory, target_filepath)\n destination = os.path.abspath(destination)\n target_dirpath = os.path.dirname(destination)\n if target_dirpath:\n try:\n os.makedirs(target_dirpath)\n except OSError, e:\n if e.errno == errno.EEXIST: pass\n else: raise\n else:\n logger.warn(str(target_dirpath)+' does not exist.')\n\n target_file_object.move(destination)", "def filedownload(source, destination):\n\n # Initiate the download\n urllib.request.urlretrieve(source, destination)", "def maybe_download(url, file_name, work_directory):\n\tif not os.path.exists(work_directory):\n\t\tos.mkdir(work_directory)\n\t\t\n\tfile_path = os.path.join(work_directory, file_name)\n\n\tif not os.path.exists(file_path):\n\t\tfile_path, _ = urllib.request.urlretrieve(url, file_path)\n\t\tstatinfo = os.stat(file_path)\n\t\tprint('Successfully downloaded', file_name, statinfo.st_size, 'bytes.')\n\t\n\tprint(\"{} existed\".format(file_path))\n\n\treturn file_path", "def download(self, url: str, dest: PathLike, force: bool = False):", "def __download_pretrained(self, fname: str, fdir: str):\n download_url = self._fastlinks[\"url\"] + fname\n r = requests.get(download_url, stream=True)\n with open(fdir, \"wb\") as downfile:\n total_length = int(r.headers.get('content-length'))\n tt = float(\"{:.2f}\".format(total_length / 1024 ** 2))\n for ch in tqdm.tqdm(iterable=r.iter_content(chunk_size=1024 ** 2), total=tt, unit='MB'):\n if ch:\n downfile.write(ch)", "def _maybe_download(self, url):\n filename = os.path.basename(url)\n download_path = os.path.join(self._model_dir, filename)\n if os.path.exists(download_path):\n return download_path\n\n def _progress(count, block_size, total_size):\n sys.stdout.write(\n '\\r>> Downloading %s %.1f%%' %\n (filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n\n urllib.request.urlretrieve(url, download_path, _progress)\n statinfo = os.stat(download_path)\n print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')\n return download_path", "def download(self,\n targetdir: Union[str, Path],\n overwrite: bool = False,\n verbose: bool = False):\n targetname = Path(targetdir, self.filename)\n \n # Check if targetname exists\n if overwrite or not targetname.exists():\n \n # Get the URL\n r = requests.get(self.url)\n \n # Print message if URL does not exist\n if r.status_code == 404:\n print(f'File URL not found: {self.url}')\n return False\n \n else:\n # Raise any other request errors\n r.raise_for_status()\n\n # Save downloaded content\n with open(targetname, 'wb') as f:\n f.write(r.content)\n if verbose:\n print(f'{self.filename} downloaded to {targetdir}')\n return True\n else:\n # Skip files that already exist\n if verbose:\n print(f'{self.filename} already in {targetdir}')\n return False", "def maybe_download(directory, filename, url):\n if not os.path.exists(directory):\n print(\"Creating directory %s\" % directory)\n os.mkdir(directory)\n filepath = os.path.join(directory, filename)\n if not os.path.exists(filepath):\n print(\"Downloading %s to %s\" % (url, filepath))\n filepath, _ = urllib.request.urlretrieve(url, filepath)\n statinfo = os.stat(filepath)\n print(\"Succesfully downloaded\", filename, statinfo.st_size, \"bytes\")\n return filepath", "def download(url, output, encoding, insrs, format_name):\n\n folder = download_data(url, encoding)\n joined_file = join_files(folder)\n transform(joined_file, output, insrs, format_name)\n\n shutil.rmtree(folder)\n os.remove(joined_file)\n\n if not os.path.isfile(output):\n raise Error(\"Output file not created, the whole process failed\")\n else:\n logging.info(\"File %s successfuly created\" % output)", "def download_file(self, parsed_event, input_dir_path):" ]
[ "0.68348867", "0.67281526", "0.6612575", "0.6554521", "0.6498222", "0.64975613", "0.6489902", "0.64766484", "0.64766484", "0.64489305", "0.64144224", "0.6401153", "0.63284457", "0.63183427", "0.63093764", "0.6271924", "0.62675196", "0.6264412", "0.6243814", "0.6232468", "0.6226773", "0.6186418", "0.6176281", "0.6169277", "0.6164012", "0.6160043", "0.6147625", "0.6123961", "0.6114501", "0.6110242" ]
0.7087589
0
get the index of the vertex under point if within epsilon tolerance
def get_index_under_point(self, event): xy = np.asarray(list(zip(self.xs, self.ys))) xyt = self.line.get_transform().transform(xy) xt, yt = xyt[:, 0], xyt[:, 1] d = np.sqrt((xt - event.x) ** 2 + (yt - event.y) ** 2) pt_idx = np.argmin(d) if d[pt_idx] >= self.max_pixels_from_vertex: pt_idx = None return pt_idx
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getVertex(self, x, y, z, epsilon=COMPARISON_EPSILON):\n for v in self.vertices:\n if (v.x - x)**2 + (v.y - y)**2 + (v.z - z)**2 <= epsilon**2:\n return v\n raise ValueError('No vertex found')", "def isinsidepointXY(x,p):\n \n return dist(x,p) < epsilon", "def nearest_vertex_to(self, point):\n distances = self.distances_to(point)\n idx = np.argmin(distances)\n return idx", "def findidx(X, v, tol=1e-3):\n\tloc = -1\n\tdiff = 1e15 # Take a big difference\n\tn = len(X)\n\n\tfor i in xrange(n):\n\t\tndiff = abs(X[i]-v)\n\t\tif ndiff <= tol and ndiff < diff:\n\t\t\tloc = i\n\t\t\tdiff = ndiff\n\t\n\treturn loc", "def getIndexPoint(event=None, plane=None, epsilon=2):\n\n if event is None:\n return None\n if plane is None:\n return None\n if len(plane) == 0:\n return None\n\n xt = np.asarray([i[1] for i in plane])\n yt = np.asarray([i[0] for i in plane])\n d = np.sqrt((xt - event.xdata)**2 / 16 + (yt - event.ydata)**2)\n index = d.argsort()[:1][0]\n # position to far away\n if d[index] >= epsilon:\n return None\n index = int(index)\n return index", "def get_index(self, u):\n if u == self.grid[-1]: # check if u equals last knot\n# index = len(self.grid) - 2 # pick next to last index\n index = (self.grid < u).argmin() - 1\n else:\n index = (self.grid > u).argmax() - 1\n return index", "def at_loc((x, y), (cx, cy), eps=0.000035):\n\treturn (x - cx)**2 + (y - cy)**2 <= eps**2", "def next_in_hull(p, v, L): \r\n N = normalize(p, L)\r\n if N != []:\r\n q = N[0]\r\n index = 0\r\n for k in range(1, len(N)):\r\n if (N[k] - q).dot(v) >= 0: # points on support line included\r\n q = N[k]\r\n index = k\r\n \r\n return index", "def get_index_of_surface_gate(data, setup={}):\n alts = data['alt']\n return np.argmin(np.abs(alts), 1)", "def checkDimension(neighbour, current_point):\n for i in range(3):\n delta = abs(neighbour[i] - current_point[i])\n if delta > 0:\n return i", "def contains ( self, pos ):\n \n poly = Polygon(array(self.edges).reshape(-1,2)[:,0],array(self.edges).reshape(-1,2)[:,1])\n dists = poly.is_inside(pos[0,:],pos[1,:]) \n if self.include_border:\n inds = dists >= -self.abs_tol\n else:\n inds = dists > 0\n \n \n # if none inside, take nearest\n if ~inds.any() and self.default_nearest:\n dr2 = array(self.edges).reshape(-1,2).mean(0)\n inds[argmin(dr2)] = True\n \n return inds", "def find(self,v):\n for i in range(len(self)):\n if near(self[i],v):\n return i\n return -1", "def Ni_find(t):\r\n return ep(t) - 1", "def test_find_triangle(self):\n points = np.array([[2.435, -3.37], [2.435, -1.82], [2.635, -2.], [2.535, -1.7]])\n connectivity_list = np.array([[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5]], dtype=np.intp)\n point = np.array([2.6, -1.9])\n self.assertEqual(1, find_triangle(point, points, connectivity_list))\n point = np.array([3., 1.]) # outside of defined vertices\n self.assertEqual(-1, find_triangle(point, points, connectivity_list))", "def eeg_findnearest(x,X):\t\n\t#x array or vector and X a scalar\n\tabsdif = np.abs(x-X)\n\tval = np.min(absdif)\n\tidx = absdif.argmin()\n\treturn val,idx", "def calc_nearest_ind(self, robot_pose):\n pass", "def cell_containing(self,xy,neighbors_to_test=4): \n hit = self.select_cells_nearest(xy, count=neighbors_to_test, inside=True)\n if hit is None:\n return -1\n else:\n return hit", "def find_third_point(a, b, pts_list, edges):\n found = 0\n minimum = exp(100) #this is dirty\n c_index = -1\n pt_index = -1\n for c_point in pts_list:\n c_index += 1\n if c_index != a and c_index != b and is_on_the_left(c_index, a, b, pts_list):\n edge_intersects = \\\n edge_intersects_edges((a, c_index), pts_list, edges) or \\\n edge_intersects_edges((b, c_index), pts_list, edges)\n if not edge_intersects:\n crit = criterion(a, b, c_index, pts_list)\n if crit < minimum:\n minimum = crit\n pt_index = c_index\n found = 1\n if found == 0:\n raise TriangulationError(\"ERROR: Optimal point not found in find_third_point().\")\n return pt_index", "def _insertion_index(points, point):\n distance = sys.float_info.max\n index = None\n begin = points[-1]\n for i, p in enumerate(points):\n temp = _distance_to_line(begin, p, point)\n if temp < distance:\n distance = temp\n index = i\n begin = p\n return index", "def _first_index_with_bigger_neighbour(P):\n i = len(P) - 1\n while i > 0 and P[i-1] >= P[i]:\n i -= 1\n return i", "def pentagonal_index(P):\n return (1 + sqrt(1 + 24 * P)) / 6", "def isinsidelineXY(l,p):\n\n return linePointXY(l,p,distance=True) < epsilon", "def dichotomous_search(loss_function: rosenbrock, start: point, direction: list, epsilon=0.1) -> float:\n a, b = advance_retreat_method(loss_function, start, direction)\n\n # find the minimum\n e = epsilon / 3\n p, q = (a + b) / 2 - e, (a + b) / 2 + e\n while abs(a - b) > epsilon:\n f_p = loss_function.f(start + point(direction[0] * p, direction[1] * p))\n f_q = loss_function.f(start + point(direction[0] * q, direction[1] * q))\n if f_p < f_q:\n b = q\n else:\n a = p\n p, q = (a + b) / 2 - e, (a + b) / 2 + e\n\n return (a + b) / 2", "def d_midpoint(edge):\n v0, v1 = EDGES[edge]\n v0_pos = VERTICES[v0]\n v1_pos = VERTICES[v1]\n return ((x+y) for (x,y) in zip(v0_pos, v1_pos))", "def get_overland_vector(catchpoints, closest, tol = 0.1, min_slope = 0.00001):\n\n length = get_distance_vector(catchpoints, closest)\n slope = (catchpoints[:,2] - closest[:,2]) / length / 100000\n\n for l, s in zip(length, slope):\n if l < tol: l, s = tol, min_slope\n\n return length / 2., slope", "def get_pent_idx(pent):\n pidx = 0\n for i in range(pent.shape[0]):\n for j in range(pent.shape[1]):\n if pent[i][j] != 0:\n pidx = pent[i][j]\n break\n if pidx != 0:\n break\n if pidx == 0:\n return -1\n return pidx - 1", "def findNeighbor(cur, dataList, eps):\n neighbors = []\n for pt in dataList:\n if (cur.x - pt.x) ** 2 + (cur.y - pt.y) ** 2 <= eps ** 2:\n neighbors.append(pt)\n return neighbors", "def query_region(self, point):\n result = []\n indexes = []\n for didx, dpoint in enumerate(self.data):\n if dpoint != point:\n if self.l2_distance(dpoint, point) <= self.eps:\n result.append(dpoint)\n indexes.append(didx)\n return result, indexes", "def TIN_z(x, y, con_ver, nbr_ver):\n elev_TIN = sc.griddata((con_ver[:,0], con_ver[:,1]), con_ver[:,2], (x, y), method='linear')\n if not(np.isnan(elev_TIN)):\n elev_i = elev_TIN\n else:\n print(\"elev_TIN is nan: evaluating else loop\")\n d_nbr = np.zeros(3)\n for n in range(0, 3):\n d_nbr[n] = ((x-nbr_ver[n][0])**2 + (y-nbr_ver[n][1])**2)**0.5\n nearest_ver = nbr_ver[d_nbr.argmax(0)]\n elev_i = nearest_ver[2]\n return elev_i", "def get_nearest_index(self, x_value: float) -> int:\n return int(np.argmax(self.x >= x_value))" ]
[ "0.66811496", "0.6199875", "0.6109701", "0.60955787", "0.60679823", "0.60257995", "0.59940207", "0.59482193", "0.59161484", "0.5886966", "0.58000994", "0.57858783", "0.57689637", "0.5733948", "0.5703687", "0.56951296", "0.56856495", "0.56644243", "0.5635767", "0.5627456", "0.5625513", "0.5611119", "0.5601545", "0.5597786", "0.5512719", "0.5502611", "0.5495094", "0.5491441", "0.54882765", "0.5481912" ]
0.6639644
1
Returns True if the two players passed as arguments have played each other already. Queries the matches database looking for the lowest player id as player_1_id because we wrote reportMatch() to always sort the player ids before creating a new row. This eliminates us having to look for the pair in either order in this function.
def havePlayedPreviously(player1, player2): # Assign player ids in a way that'll allow us to search for the lowest # first player1ID = min(player1, player2) player2ID = max(player1, player2) # Query the database for this pairing dbconnection = connect() dbcursor = dbconnection.cursor() # Use of 'COALESCE' returns zero instead of 'None' when query returns no # rows dbcursor.execute(""" SELECT COALESCE(COUNT(*), 0) FROM matches WHERE player_1_id = " + str(player1ID) + " AND player_2_id = " + str(player2ID) """) # Assign only the first value in the first tuple to avoid error previousMatches = dbcursor.fetchall()[0][0] dbconnection.close() # Return True or False, depending on whether a previous match exists or not if (previousMatches > 0): return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def played(p1, p2):\n conn, cur = connect()\n if p1 > p2:\n p1, p2 = p2, p1\n cur.execute(\"SELECT * FROM MATCHES WHERE P1 = %s and P2 = %s;\", (p1, p2,))\n row = cur.fetchone()\n conn.close()\n return row is not None", "def check_tie(self, player1, player2):\n if self.check_win(player1) or self.check_win(player2):\n return False\n return self.check_grid_full()", "def check_if_two_players_on_team(self, member_one, member_two):\n\n try:\n self._logger.debug(\"Checking if players are already on team together\")\n self.check_if_db_connected()\n cursor = self._db_conn.cursor()\n\n cursor.execute(\"SELECT player_id FROM player WHERE \\\nfirst_name = '{0}' AND last_name = '{1}' AND nickname = '{2}'\".format(\n member_one[0], member_one[1], member_one[2]))\n player_one_id = cursor.fetchone()[0]\n\n cursor.execute(\"SELECT player_id FROM player WHERE \\\nfirst_name = '{0}' AND last_name = '{1}' AND nickname = '{2}'\".format(\n member_two[0], member_two[1], member_two[2]))\n player_two_id = cursor.fetchone()[0]\n\n cursor.execute(\"SELECT team FROM player_team_xref WHERE \\\nplayer = {0}\".format(player_one_id))\n teams = cursor.fetchall()\n\n #TODO will give false positive if members are on a team of three\n for team in teams:\n cursor.execute(\"SELECT player FROM player_team_xref WHERE \\\nteam = {0}\".format(team[0]))\n players = cursor.fetchall()\n for player in players:\n if player[0] == player_two_id:\n return team[0]\n\n except MySQLdb.OperationalError:\n self._logger.error(\"MySQL operational error occured\")\n traceback.print_exc()\n raise exceptions.DBConnectionError(\"Cannot connect to MySQL server\")\n\n except MySQLdb.ProgrammingError:\n self._logger.error(\"MySQL programming error\")\n traceback.print_exc()\n raise exceptions.DBSyntaxError(\"MySQL syntax error\")\n\n else:\n return False", "def win(player1, player2):\n if(player1 == 1 and player2 == 3) or (player1 == 2 and player2 == 1) \\\n or (player1 == 3 and player2 == 2):\n return True", "def reportMatch(player1, player2, winner = None):\n #Check for Bye matchup (player1=player2)\n bye_match = False\n if player1 == player2:\n winner = player1\n bye_match = True\n\n #Generate random winner if no winner param is passed in\n if winner is None:\n rand = random.random()\n if rand < 0.5:\n winner = player1\n else:\n winner = player2\n\n conn, cur = connect()\n #Insert record of match\n query = \"\"\"INSERT INTO matches (player1_id, player2_id, winner)\n VALUES (%s, %s, %s)\"\"\"\n params = (player1, player2, winner,)\n try:\n cur.execute(query, params)\n except:\n print(\"Error encountered when inserting match record into the database\")\n\n #Save record of player having a bye in Players table\n if bye_match:\n query = \"UPDATE players SET had_bye = TRUE WHERE player_id = %s;\"\n param = (player1,)\n try:\n cur.execute(query, param)\n except:\n print(\"Error encountered when updating player's Bye round status.\")\n conn.commit()\n conn.close()", "def gameOver():\n if len(p1)==0 and len(p1winnings)==0:\n return True\n elif len(p2)==0 and len(p2winnings)==0:\n return True\n return False", "def playGamePartTwo(playerOne, playerTwo, historyPlayerOne, historyPlayerTwo):\n\n #print(\"#########################################################################################################\")\n #print(f\"Entry into game: playerOne: {playerOne}, playerTwo: {playerTwo}, historyPlayerOne: {historyPlayerOne}, historyPlayerTwo:{historyPlayerTwo}\")\n\n playerOneHand = copy.deepcopy(playerOne)\n playerTwoHand = copy.deepcopy(playerTwo)\n\n index = 1\n while(playerOneHand and playerTwoHand):\n\n #print(f\"Round: {index}\")\n #print(f\"playerOneHand: {playerOneHand}\")\n #print(f\"playerTwoHand: {playerTwoHand}\")\n\n if ''.join(str(e) for e in playerOneHand) in historyPlayerOne or ''.join(str(e) for e in playerTwoHand) in historyPlayerTwo:\n #print(\"Repeat of history\")\n #print(f\"playerOneHand: {playerOneHand}, historyPlayerOne: {historyPlayerOne}\")\n #print(f\"playerTwoHand: {playerTwoHand},historyPlayerTwo:{historyPlayerTwo}\")\n return [\"win\"], []\n break\n else:\n historyPlayerOne.add(''.join(str(e) for e in playerOneHand))\n historyPlayerTwo.add(''.join(str(e) for e in playerTwoHand))\n\n playerOneCard = playerOneHand.pop(0)\n playerTwoCard = playerTwoHand.pop(0)\n\n if len(playerOneHand) >= playerOneCard and len(playerTwoHand) >= playerTwoCard:\n playerOneResult, playerTwoResult = playGamePartTwo(playerOneHand[:playerOneCard], playerTwoHand[:playerTwoCard], set(), set() )\n\n if playerOneResult:\n playerOneHand.append(playerOneCard)\n playerOneHand.append(playerTwoCard)\n else:\n playerTwoHand.append(playerTwoCard)\n playerTwoHand.append(playerOneCard)\n\n else:\n if playerOneCard > playerTwoCard:\n #print(\"Player one wins\")\n playerOneHand.append(playerOneCard)\n playerOneHand.append(playerTwoCard)\n else:\n #print(\"Player two wins\")\n playerTwoHand.append(playerTwoCard)\n playerTwoHand.append(playerOneCard)\n\n index += 1\n\n #print(f\"playerOneHand: {playerOneHand}\")\n #print(f\"playerTwoHand: {playerTwoHand}\")\n\n #finalList = playerOneHand + playerTwoHand\n #mulList = list(range(len(finalList), 0, -1 ) )\n\n #print(f\"mulList: {mulList}\")\n #print(f\"finalList: {finalList}\")\n\n #print(\"---------------------------------------------------------------------------------------\")\n #print(\"End of game\")\n #time.sleep(1)\n\n return playerOneHand, playerTwoHand\n\n #return sum(map(mul, mulList, finalList))", "def check_win(players: List[Player]) -> Tuple[bool, Optional[Player]]:\n total_players = len(players)\n for player in players:\n if player.influence == 0:\n total_players -= 1\n if total_players == 1:\n for player in players:\n if player.influence >0:\n return True, player\n return False, None", "def match(self, move):\n click1 = move[0]\n click2 = move[1]\n\n if click1['row'] == click2['row'] and click1['column'] == click2['column']:\n raise Exception(\"Corrupt move\") # \"Move is the same\"\n\n # Fetch the card id from playfield for the two different squares. If the id number in the two squares are the same\n # then we have a matching set of cards.\n id1 = self.get_card_id(click1)\n id2 = self.get_card_id(click2)\n move[0].update({'card': id1})\n move[1].update({'card': id2})\n return move, id1 == id2", "def same_player(self, other):\n return self.name == other.name \\\n and self.color == other.color", "def check_faced_players(self, pair: tuple[Player]) -> tuple or None:\n player_1 = pair[0]\n player_2 = pair[1]\n\n if player_1.family_name in player_2.faced_players:\n return None\n else:\n return pair", "def reportMatch(p1, p2, winner=-1):\n if p1 > p2:\n p1, p2 = p2, p1\n conn, cur = connect()\n query = \"SELECT report_match(%s, %s, %s)\"\n param = (p1, p2, winner,)\n cur.execute(query, param)\n conn.commit()\n conn.close()", "def play_game(deck1, deck2):\n seen = set()\n dq1 = collections.deque(deck1)\n dq2 = collections.deque(deck2)\n while dq1 and dq2:\n c1 = dq1.popleft()\n c2 = dq2.popleft()\n if c1 <= len(dq1) and c2 <= len(dq2):\n # Recursive game\n winner, _, _ = play_game(list(dq1)[:c1], list(dq2)[:c2])\n else:\n winner = 1 if c1 > c2 else 2\n\n if winner == 1:\n dq1.extend([c1, c2])\n else:\n dq2.extend([c2, c1])\n\n fp = (tuple(dq1), tuple(dq2))\n if fp in seen:\n return 1, dq1, dq2\n seen.add(fp)\n\n return (1 if dq1 else 2), dq1, dq2", "def check_same_tournaments_points(self, players_list: list[Player]) -> bool:\n general_ranking_list = []\n for player in players_list:\n general_ranking_list.append(player.classement)\n if len(general_ranking_list) == len(set(general_ranking_list)):\n return False\n else:\n return True", "def create_pair(self, players_list: list[Player], id_number, already_paired=[]) -> tuple:\n for player_1, player_2 in zip(repeat(players_list[id_number]), players_list[1:]):\n tuple = (player_1, player_2)\n pair = self.check_faced_players(tuple)\n if pair is None:\n pass\n else:\n if pair[0] in already_paired:\n pass\n elif pair[1] in already_paired:\n pass\n elif pair[0] == pair[1]:\n pass\n else:\n return pair", "def reportMatch(winner, loser):\n \n if not winner or not loser:\n print \"one or no players specified for report match\"\n else:\n query = \"INSERT INTO matches \\\n (playeroneid, losingplayerid) \\\n VALUES (%s,%s)\"\n values = (winner, loser)\n results = executeQuery({\n 'dbname': 'tournament', \n 'query' : query, \n 'type' : 'insert', \n 'values' : values\n })", "def isPlayerInGame(self, playerName):\n for team, players in self.players.items():\n for player in players:\n if playerName == player.name:\n return True, team\n return False, None", "def test_equal(self):\r\n\r\n a_players = [ZeroPlayer(1), ZeroPlayer(2)]\r\n a_x_dist = 3\r\n a_y_dist = 3\r\n a_num_to_win = 1\r\n a_game = Game(a_players, a_x_dist, a_y_dist, a_num_to_win)\r\n\r\n b_players = [ZeroPlayer(1), ZeroPlayer(2)]\r\n b_x_dist = 3\r\n b_y_dist = 3\r\n b_num_to_win = 1\r\n b_game = Game(b_players, b_x_dist, b_y_dist, b_num_to_win)\r\n\r\n c_players = [ZeroPlayer(1), ZeroPlayer(2)]\r\n c_x_dist = 3\r\n c_y_dist = 3\r\n c_num_to_win = 1\r\n c_game = Game(c_players, c_x_dist, c_y_dist, c_num_to_win)\r\n\r\n self.assertTrue(b_game == a_game == c_game)\r\n\r\n a_game.play_game()\r\n b_game.play_game()\r\n\r\n self.assertTrue(a_game == b_game)\r\n self.assertFalse(c_game == a_game)\r\n\r\n c_game.play_game()\r\n\r\n self.assertTrue(b_game == a_game == c_game)", "def reportMatch(winner, loser):\n c.execute(\"INSERT INTO matchup(winner,loser) VALUES (?,?)\", (winner,loser,));\n c.execute(\"UPDATE players SET wins = wins + 1, matches = matches + 1 WHERE id = ?\", (winner,));\n c.execute(\"UPDATE players SET matches=matches+1 WHERE id =?\", (loser,));\n print \"Player #id %d was the winner and player #id %d was the loser\" % (winner,loser)\n return", "def reportMatch(winner, loser):\n DB = connect()\n c = DB.cursor()\n c.execute(\"SELECT id FROM matches WHERE pID1 = %s and pID2= %s or pID2 = %s and pID1= %s\", (winner,loser,))\n result= c.fetchone()\n c.execute(\"INSERT INTO wins VALUES(%s,%s,%s)\",(winner,loser,result[0],))\n DB.commit()\n DB.close()", "def checkForRematch(player_id, opponent_id):\n conn, cur = connect()\n query = \"SELECT check_for_rematch(%s, %s);\"\n params = (player_id, opponent_id,)\n try:\n cur.execute(query, params)\n except:\n print(\"Error encountered when checking if playerId:\" + str(player_id) +\n \" has already played opponentId:\" + str(opponent_id))\n isRematch = cur.fetchone()\n conn.close()\n return int(isRematch[0])", "def swissPairings():\n #get all players, sort by number of wins.create matches with the 2 adjacent players\n c.execute(\"\"\"SELECT id, playerName, num_wins\n FROM (SELECT winner_id, count(match_id) as num_wins \n FROM wins \n group by winner_id \n )as R1 right join tournament on R1.winner_id= tournament.id order by num_wins\"\"\")\n result= c.fetchall()\n for row in result:\n print row", "def swissPairings():\n # LOGIC used in pairing :\n # Latest standings are extracted using \"players\" table.\n # From the standings, 2 players sets/tuples are chosen wherein the players have similar \"wins\".(Adjacent)\n #\n cur4 = conn.cursor()\n query = \"\"\"SELECT id, name, sum(wincount) as wins, sum(lose_count)+sum(wincount) as total\n from\n (((\n select p.id, p.name, count(winner) as wincount, '0' as lose_count\n from players p left join matches on p.id=winner group by p.id, p.name order by count(winner) desc)\n UNION\n (select p.id, p.name, '0' as wincount, count(loser) as lose_count\n from players p left join matches on p.id=loser group by p.id, p.name order by count(loser) desc\n )))\n as standings group by id, name order by wins desc, total asc;\"\"\"\n cur4.execute(query)\n rows = cur4.fetchall()\n\n # Below are the temporary variables used in processing.\n count = 1\n temp_pid = ()\n temp_name = ()\n pid = ()\n name = ()\n\n # For executing the test cases successfully, the returned datastructure\n # should be a list of tuples.\n outer_list = []\n inner_tuple = ()\n\n # Instantiating and returning the datastructure.\n for row in rows:\n # The function needs to send pid,name hence extracting them.\n pid = (row[0],)\n name = (row[1],)\n if count in {1, 3, 5, 7}:\n temp_pid = pid\n temp_name = name\n else:\n inner_tuple = temp_pid+temp_name+pid+name\n outer_list.append(inner_tuple)\n count = count+1\n return outer_list", "def activity_pair_matches(self, trace, activity1, activity2) -> bool:\n raise NotImplementedError", "def activity_pair_matches(self, trace, activity1, activity2) -> bool:\n return self.subtrace_count(trace, [activity1, activity2]) > 0", "def is_winner(self, player: str) -> bool:\n total_result = self.current_state.hori_result + self.current_state.left_result + self.current_state.right_result\n total_line = len(total_result)\n p1_taken = 0\n p2_taken = 0\n for item in total_result:\n if item == '1':\n p1_taken+=1\n elif item == '2':\n p2_taken += 1\n if player == \"p1\":\n return float(p1_taken) >= total_line/2\n return float(p2_taken) >= total_line/2", "def all_players_finish(self):\n return len(self.game_winners) == len(self.players)", "def win(self, player):\n if player == 1:\n a = self.player_one.moves\n else:\n a = self.player_two.moves\n winning_moves = []\n for i in range(1, 9, 3):\n winning_moves.append(range(i, i + 3))\n for i in range(1, 4):\n winning_moves.append(range(i, i + 7, 3))\n winning_moves.append([1, 5, 9])\n winning_moves.append([3, 5, 7])\n for move in winning_moves:\n flg = True\n for index in move:\n if index not in a:\n flg = False\n break\n if flg:\n return True, player\n if len(self.player_one.moves) + len(self.player_two.moves) == 9:\n self.print_space()\n self.display_board()\n self.print_space()\n print \" Games is drawn\"\n self.logging.debug(\"Game is draw, nobody won\")\n self.logging.debug(\"Enjoy the game again :)\")\n sys.exit(100)\n return False, player", "def swissPairings():\n\n match_tup = ()\n matches_list = []\n player_count = 0 # keeps track of how many players per match\n players = playerStandings();\n for player in players:\n if player_count == 0:\n playerone = player\n player_count += 1\n elif player_count == 1:\n playertwo = player\n player_count += 1\n if player_count == 2: # match full, add match to list then reset\n match_tup = (playerone[0],playerone[1],playertwo[0],playertwo[1])\n matches_list.append(match_tup)\n player_count = 0\n return matches_list", "def _check_winning_combinations(board, player):\n winning_combinations = (\n ((0, 0), (0, 1), (0, 2)),\n ((1, 0), (1, 1), (1, 2)),\n ((2, 0), (2, 1), (2, 2)),\n ((0, 0), (1, 0), (2, 0)),\n ((0, 1), (1, 1), (2, 1)),\n ((0, 2), (1, 2), (2, 2)),\n ((0, 0), (1, 1), (2, 2)),\n ((0, 2), (1, 1), (2, 0))\n )\n\n if any(combination for combination in winning_combinations if _is_winning_combination(board, combination, player)):\n return player\n\n return None" ]
[ "0.6778872", "0.6490606", "0.62809837", "0.6271337", "0.627046", "0.62453085", "0.61801904", "0.61615", "0.61376363", "0.60704356", "0.604136", "0.5922924", "0.589727", "0.5894811", "0.5865621", "0.5854355", "0.5851207", "0.5843786", "0.5834625", "0.5811289", "0.58053577", "0.579913", "0.5795443", "0.5757632", "0.57467645", "0.57406217", "0.574025", "0.5737922", "0.57279795", "0.57000905" ]
0.73695046
0
The test checks whether the virtual operation "hardfork_hive_operation" (generated on hardfork 23) contains correct data related to "air drop" HIVE.
def test_reset_data_provided_by_hardfork_hive_operation_generated_between_hf_22_and_hf_23(node: tt.InitNode): wallet = tt.Wallet(attach_to=node) wallet.create_account("goku1", hives=tt.Asset.Test(50) , hbds=tt.Asset.Tbd(50), vests=tt.Asset.Test(50)) wallet.create_account("steem", hives=tt.Asset.Test(100) , hbds=tt.Asset.Tbd(100) , vests=tt.Asset.Test(100)) with wallet.in_single_transaction(): wallet.api.delegate_vesting_shares("goku1", "steem", tt.Asset.Vest(5)) wallet.api.delegate_vesting_shares("steem", "goku1", tt.Asset.Vest(10)) assert get_hardfork_version(node) == "0.22.0" assert_account_resources(node, "goku1", operator.gt) assert_account_resources(node, "steem", operator.gt) __wait_for_hardfork_23_application(node) assert get_hardfork_version(node) == "0.23.0" assert_account_resources(node, "goku1", operator.eq) assert_account_resources(node, "steem", operator.eq) assert_cleared_resources_in_hardfork_hive_operation(node, "goku1") assert_cleared_resources_in_hardfork_hive_operation(node, "steem")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_hms_service_dies(self):\n # Force the tables to be uncached and then kill the hive metastore.\n tbl_name = \"functional.alltypes\"\n self.client.execute(\"invalidate metadata %s\" % tbl_name)\n kill_cmd = os.path.join(os.environ['IMPALA_HOME'], 'testdata/bin/kill-hive-server.sh')\n check_call([kill_cmd], close_fds=True)\n\n try:\n self.client.execute(\"describe %s\" % tbl_name)\n except ImpalaBeeswaxException as e:\n print(str(e))\n assert \"Failed to load metadata for table: %s. Running 'invalidate metadata %s' \"\\\n \"may resolve this problem.\" % (tbl_name, tbl_name) in str(e)\n self.run_hive_server()\n\n self.client.execute(\"invalidate metadata %s\" % tbl_name)\n self.client.execute(\"describe %s\" % tbl_name)", "def test_ha_vms(self):\n vm_host = ll_vms.get_vm_host(vm_name=conf.VM_NAME[0])\n host_resource = rhevm_helpers.get_host_resource_by_name(\n host_name=vm_host\n )\n for vm_name in conf.VM_NAME[:2]:\n testflow.step(\n \"Kill QEMU process of VM %s on host %s\", vm_name, vm_host\n )\n assert ll_hosts.kill_vm_process(\n resource=host_resource, vm_name=vm_name\n )\n\n testflow.step(\n \"Wait until both HA VM's %s will change state to %s\",\n conf.VM_NAME[:2], conf.VM_POWERING_UP\n )\n assert ll_vms.waitForVmsStates(\n positive=True, names=conf.VM_NAME[:2], states=conf.VM_POWERING_UP\n )\n\n testflow.step(\n \"Check that both HA VM's started on the same host\"\n )\n assert (\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[0]) ==\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[1])\n )", "def test_fecth_inventory_and_error():\n # build\n for key in divHretention.database_inv_sig:\n # ensuring an empty database\n del divHretention.database_inv_sig[key]\n\n # test\n test_time = 1e3\n start_time = time.time()\n inv, sig = divHretention.fetch_inventory_and_error(test_time)\n long_time = time.time() - start_time\n\n start_time = time.time()\n inv, sig = divHretention.fetch_inventory_and_error(test_time)\n short_time = time.time() - start_time\n\n assert test_time in divHretention.database_inv_sig\n assert short_time < long_time", "def test_restart_heketi_pod(self):\n\n # create heketi volume\n vol_info = heketi_volume_create(self.heketi_client_node,\n self.heketi_server_url,\n size=1, json=True)\n self.assertTrue(vol_info, \"Failed to create heketi volume of size 1\")\n self.addCleanup(\n heketi_volume_delete, self.heketi_client_node,\n self.heketi_server_url, vol_info['id'], raise_on_error=False)\n topo_info = heketi_topology_info(self.heketi_client_node,\n self.heketi_server_url,\n json=True)\n\n # get heketi-pod name\n heketi_pod_name = get_pod_name_from_dc(self.ocp_master_node[0],\n self.heketi_dc_name)\n\n # delete heketi-pod (it restarts the pod)\n oc_delete(self.ocp_master_node[0], 'pod', heketi_pod_name)\n wait_for_resource_absence(self.ocp_master_node[0],\n 'pod', heketi_pod_name)\n\n # get new heketi-pod name\n heketi_pod_name = get_pod_name_from_dc(self.ocp_master_node[0],\n self.heketi_dc_name)\n wait_for_pod_be_ready(self.ocp_master_node[0],\n heketi_pod_name)\n\n # check heketi server is running\n self.assertTrue(\n hello_heketi(self.heketi_client_node, self.heketi_server_url),\n \"Heketi server %s is not alive\" % self.heketi_server_url\n )\n\n # compare the topology\n new_topo_info = heketi_topology_info(self.heketi_client_node,\n self.heketi_server_url,\n json=True)\n self.assertEqual(new_topo_info, topo_info, \"topology info is not same,\"\n \" difference - %s\" % diff(topo_info, new_topo_info))\n\n # create new volume\n vol_info = heketi_volume_create(self.heketi_client_node,\n self.heketi_server_url,\n size=2, json=True)\n self.assertTrue(vol_info, \"Failed to create heketi volume of size 20\")\n heketi_volume_delete(\n self.heketi_client_node, self.heketi_server_url, vol_info['id'])", "def test_shd_should_not_crash_executed_heal_info(self):\n # pylint: disable=too-many-statements\n bricks_list = get_all_bricks(self.mnode, self.volname)\n # Setting options\n g.log.info('Setting options...')\n options = {\"metadata-self-heal\": \"off\",\n \"entry-self-heal\": \"off\",\n \"data-self-heal\": \"off\"}\n ret = set_volume_options(self.mnode, self.volname, options)\n self.assertTrue(ret, 'Failed to set options %s' % options)\n g.log.info(\"Successfully set %s for volume %s\",\n options, self.volname)\n\n # Creating files on client side\n for mount_obj in self.mounts:\n g.log.info(\"Generating data for %s:%s\",\n mount_obj.client_system, mount_obj.mountpoint)\n # Create files\n g.log.info('Creating files...')\n command = (\"python %s create_files -f 10 --fixed-file-size 1M %s\"\n % (self.script_upload_path, mount_obj.mountpoint))\n\n proc = g.run_async(mount_obj.client_system, command,\n user=mount_obj.user)\n self.all_mounts_procs.append(proc)\n self.io_validation_complete = False\n\n # Validate IO\n self.assertTrue(\n validate_io_procs(self.all_mounts_procs, self.mounts),\n \"IO failed on some of the clients\"\n )\n self.io_validation_complete = True\n\n # Bring brick0 offline\n g.log.info('Bringing bricks %s offline', bricks_list[0])\n ret = bring_bricks_offline(self.volname, bricks_list[0])\n self.assertTrue(ret, 'Failed to bring bricks %s offline'\n % bricks_list[0])\n\n ret = are_bricks_offline(self.mnode, self.volname,\n [bricks_list[0]])\n self.assertTrue(ret, 'Bricks %s are not offline'\n % bricks_list[0])\n g.log.info('Bringing bricks %s offline is successful',\n bricks_list[0])\n\n # Creating files on client side\n number_of_files_one_brick_off = '1000'\n self.all_mounts_procs = []\n for mount_obj in self.mounts:\n g.log.info(\"Generating data for %s:%s\",\n mount_obj.client_system, mount_obj.mountpoint)\n # Create files\n g.log.info('Creating files...')\n command = (\"python %s create_files \"\n \"-f %s \"\n \"--fixed-file-size 1k \"\n \"--base-file-name new_file \"\n \"%s\"\n % (self.script_upload_path,\n number_of_files_one_brick_off,\n mount_obj.mountpoint))\n\n proc = g.run_async(mount_obj.client_system, command,\n user=mount_obj.user)\n self.all_mounts_procs.append(proc)\n self.io_validation_complete = False\n\n # Validate IO\n self.assertTrue(\n validate_io_procs(self.all_mounts_procs, self.mounts),\n \"IO failed on some of the clients\"\n )\n self.io_validation_complete = True\n\n # Get heal info\n g.log.info(\"Getting heal info...\")\n heal_info_data = get_heal_info_summary(self.mnode, self.volname)\n self.assertIsNotNone(heal_info_data, 'Failed to get heal info.')\n g.log.info('Success in getting heal info')\n\n # Check quantity of file pending heal\n for brick in bricks_list[1:]:\n self.assertEqual(heal_info_data[brick]['numberOfEntries'],\n str(int(number_of_files_one_brick_off)+1),\n 'Number of files pending heal is not correct')\n\n # Setting options\n g.log.info('Setting options...')\n options = {\"performance.enable-least-priority\": \"enable\"}\n ret = set_volume_options(self.mnode, self.volname, options)\n self.assertTrue(ret, 'Failed to set options %s' % options)\n g.log.info(\"Successfully set %s for volume %s\",\n options, self.volname)\n\n # Bring brick1 offline\n g.log.info('Bringing bricks %s offline', bricks_list[1])\n ret = bring_bricks_offline(self.volname, bricks_list[1])\n self.assertTrue(ret, 'Failed to bring bricks %s offline'\n % bricks_list[1])\n\n ret = are_bricks_offline(self.mnode, self.volname,\n [bricks_list[1]])\n self.assertTrue(ret, 'Bricks %s are not offline'\n % bricks_list[1])\n g.log.info('Bringing bricks %s offline is successful',\n bricks_list[1])\n\n # Setting options\n g.log.info('Setting options...')\n options = {\"quorum-type\": \"fixed\"}\n ret = set_volume_options(self.mnode, self.volname, options)\n self.assertTrue(ret, 'Failed to set options %s' % options)\n g.log.info(\"Successfully set %s for volume %s\",\n options, self.volname)\n\n # Creating files on client side\n number_of_files_two_brick_off = '100'\n self.all_mounts_procs = []\n for mount_obj in self.mounts:\n g.log.info(\"Generating data for %s:%s\",\n mount_obj.client_system, mount_obj.mountpoint)\n # Create files\n g.log.info('Creating files...')\n command = (\"python %s create_files \"\n \"-f %s \"\n \"--fixed-file-size 1k \"\n \"--base-file-name new_new_file \"\n \"%s\"\n % (self.script_upload_path,\n number_of_files_two_brick_off,\n mount_obj.mountpoint))\n\n proc = g.run_async(mount_obj.client_system, command,\n user=mount_obj.user)\n self.all_mounts_procs.append(proc)\n self.io_validation_complete = False\n\n # Validate IO\n self.assertTrue(\n validate_io_procs(self.all_mounts_procs, self.mounts),\n \"IO failed on some of the clients\"\n )\n self.io_validation_complete = True\n\n # Get heal info\n g.log.info(\"Getting heal info...\")\n heal_info_data = get_heal_info_summary(self.mnode, self.volname)\n self.assertIsNotNone(heal_info_data, 'Failed to get heal info.')\n g.log.info('Success in getting heal info')\n\n # Check quantity of file pending heal\n number_of_files_to_check = str(int(number_of_files_one_brick_off) +\n int(number_of_files_two_brick_off) + 1)\n self.assertEqual(heal_info_data[bricks_list[-1]]['numberOfEntries'],\n number_of_files_to_check,\n 'Number of files pending heal is not correct')", "def test_repair_hive_table(self, mock_logging):\n query_result = [{'Status': 'SUCCEEDED'}]\n self.client.athena_client = MockAthenaClient(results=query_result)\n\n self.client.repair_hive_table({'unit-testing.streamalerts'})\n assert_true(mock_logging.info.called)", "def test_ha_vm(self):\n testflow.step(\n \"Add VM %s to affinity group %s\",\n conf.VM_NAME[2], self.affinity_group_name\n )\n assert ll_clusters.update_affinity_group(\n cluster_name=conf.CLUSTER_NAME[0],\n old_name=self.affinity_group_name,\n vms=conf.VM_NAME[:3],\n positive=False\n )\n ha_host = ll_vms.get_vm_host(vm_name=conf.VM_NAME[2])\n host_resource = rhevm_helpers.get_host_resource_by_name(\n host_name=ha_host\n )\n testflow.step(\"Kill HA VM %s\", conf.VM_NAME[2])\n assert ll_hosts.kill_vm_process(\n resource=host_resource, vm_name=conf.VM_NAME[2]\n )\n testflow.step(\"Wait for HA VM %s to be down\", conf.VM_NAME[2])\n assert ll_vms.waitForVMState(vm=conf.VM_NAME[2], state=conf.VM_DOWN)\n testflow.step(\n \"Check that HA VM %s fails to start\", conf.VM_NAME[2]\n )\n assert not ll_vms.waitForVMState(vm=conf.VM_NAME[2], timeout=120)\n testflow.step(\"Stop VM %s\", conf.VM_NAME[1])\n assert ll_vms.stopVm(positive=True, vm=conf.VM_NAME[1])\n testflow.step(\n \"Check that HA VM %s succeeds to start\", conf.VM_NAME[2]\n )\n assert ll_vms.waitForVMState(\n vm=conf.VM_NAME[2], state=conf.VM_POWERING_UP\n )", "def test_delete_hyperflex_hxdp_version(self):\n pass", "def test_execute_host_maintenance(self):\n self.addCleanup(self.rollback_compute_nodes_status)\n instances = self._create_one_instance_per_host_with_statistic()\n hostname = instances[0].get('OS-EXT-SRV-ATTR:hypervisor_hostname')\n audit_parameters = {\"maintenance_node\": hostname}\n\n _, goal = self.client.show_goal(self.GOAL)\n _, strategy = self.client.show_strategy(\"host_maintenance\")\n _, audit_template = self.create_audit_template(\n goal['uuid'], strategy=strategy['uuid'])\n\n self.assertTrue(test_utils.call_until_true(\n func=functools.partial(\n self.has_action_plans_finished),\n duration=600,\n sleep_for=2\n ))\n\n _, audit = self.create_audit(\n audit_template['uuid'], parameters=audit_parameters)\n\n try:\n self.assertTrue(test_utils.call_until_true(\n func=functools.partial(\n self.has_audit_finished, audit['uuid']),\n duration=600,\n sleep_for=2\n ))\n except ValueError:\n self.fail(\"The audit has failed!\")\n\n _, finished_audit = self.client.show_audit(audit['uuid'])\n if finished_audit.get('state') in ('FAILED', 'CANCELLED'):\n self.fail(\"The audit ended in unexpected state: %s!\" %\n finished_audit.get('state'))\n\n _, action_plans = self.client.list_action_plans(\n audit_uuid=audit['uuid'])\n action_plan = action_plans['action_plans'][0]\n\n _, action_plan = self.client.show_action_plan(action_plan['uuid'])\n _, action_list = self.client.list_actions(\n action_plan_uuid=action_plan[\"uuid\"])\n\n if action_plan['state'] in ('SUPERSEDED', 'SUCCEEDED'):\n # This means the action plan is superseded so we cannot trigger it,\n # or it is empty.\n return\n for action in action_list['actions']:\n self.assertEqual('PENDING', action.get('state'))\n\n # Execute the action by changing its state to PENDING\n _, updated_ap = self.client.start_action_plan(action_plan['uuid'])\n\n self.assertTrue(test_utils.call_until_true(\n func=functools.partial(\n self.has_action_plan_finished, action_plan['uuid']),\n duration=600,\n sleep_for=2\n ))\n _, finished_ap = self.client.show_action_plan(action_plan['uuid'])\n _, action_list = self.client.list_actions(\n action_plan_uuid=finished_ap[\"uuid\"])\n self.assertIn(updated_ap['state'], ('PENDING', 'ONGOING'))\n self.assertIn(finished_ap['state'], ('SUCCEEDED', 'SUPERSEDED'))\n\n for action in action_list['actions']:\n self.assertEqual('SUCCEEDED', action.get('state'))", "def test_healthcheck_galera_cluster(host):\n\n sql_query = (\"show status where Variable_name like 'wsrep_clu%'\"\n \"or Variable_name like 'wsrep_local_state%';\")\n mysql_cmd = 'mysql -h localhost -e \"{0}\"'.format(sql_query)\n\n cmd = \"{} {}\".format(galera_container, mysql_cmd)\n\n output = host.run(cmd)\n verify_items = ['wsrep_cluster_conf_id',\n 'wsrep_cluster_size',\n 'wsrep_cluster_state_uuid',\n 'wsrep_cluster_status',\n 'wsrep_local_state_uuid']\n\n for item in verify_items:\n assert item in output.stdout", "def test_workload_get_command_human_readable(\n workload_get_success, workload_get_success_hr\n):\n hr_output = prepare_workload_get_output(workload_get_success)\n assert hr_output == workload_get_success_hr", "def test_other_iam_data_fixes_in_GH_393(self):\n # Cassandra: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonkeyspacesforapachecassandra.html\n results = get_actions_for_service(\"cassandra\")\n self.assertTrue(\"cassandra:Restore\" in results)\n # Comprehend Medical: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazoncomprehendmedical.html\n results = get_actions_for_service(\"comprehendmedical\")\n # print(results)\n actions = [\n \"comprehendmedical:DescribeEntitiesDetectionV2Job\",\n \"comprehendmedical:DescribeICD10CMInferenceJob\",\n \"comprehendmedical:DescribePHIDetectionJob\",\n \"comprehendmedical:DescribeRxNormInferenceJob\",\n # \"comprehendmedical:DescribeSNOMEDCTInferenceJob\", # Not in SAR\n \"comprehendmedical:DetectEntitiesV2\",\n \"comprehendmedical:InferICD10CM\",\n \"comprehendmedical:InferRxNorm\",\n # \"comprehendmedical:InferSNOMEDCT\", # Not in SAR\n \"comprehendmedical:ListEntitiesDetectionV2Jobs\",\n \"comprehendmedical:ListICD10CMInferenceJobs\",\n \"comprehendmedical:ListPHIDetectionJobs\",\n \"comprehendmedical:ListRxNormInferenceJobs\",\n # \"comprehendmedical:ListSNOMEDCTInferenceJobs\", # Not in SAR\n \"comprehendmedical:StartEntitiesDetectionV2Job\",\n \"comprehendmedical:StartICD10CMInferenceJob\",\n \"comprehendmedical:StartPHIDetectionJob\",\n \"comprehendmedical:StartRxNormInferenceJob\",\n \"comprehendmedical:StopEntitiesDetectionV2Job\",\n \"comprehendmedical:StopICD10CMInferenceJob\",\n ]\n for action in actions:\n # if action not in results:\n # print(action)\n self.assertTrue(action in results)\n # Compute Optimizer\n results = get_actions_for_service(\"compute-optimizer\")\n actions = [\n \"compute-optimizer:DeleteRecommendationPreferences\",\n \"compute-optimizer:ExportEBSVolumeRecommendations\",\n \"compute-optimizer:ExportLambdaFunctionRecommendations\",\n \"compute-optimizer:GetEffectiveRecommendationPreferences\",\n \"compute-optimizer:GetEnrollmentStatusesForOrganization\",\n \"compute-optimizer:GetLambdaFunctionRecommendations\",\n \"compute-optimizer:GetRecommendationPreferences\",\n \"compute-optimizer:PutRecommendationPreferences\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # DataSync\n results = get_actions_for_service(\"datasync\")\n actions = [\n \"datasync:UpdateLocationNfs\",\n \"datasync:UpdateLocationObjectStorage\",\n \"datasync:UpdateLocationSmb\",\n \"datasync:UpdateTaskExecution\"\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # Account Management\n results = get_actions_for_service(\"account\")\n actions = [\n \"account:DeleteAlternateContact\",\n \"account:GetAlternateContact\",\n \"account:PutAlternateContact\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # AWS IAM Access Analyzer\n results = get_actions_for_service(\"access-analyzer\")\n actions = [\n \"access-analyzer:CancelPolicyGeneration\",\n \"access-analyzer:CreateAccessPreview\",\n \"access-analyzer:GetAccessPreview\",\n \"access-analyzer:GetGeneratedPolicy\",\n \"access-analyzer:ListAccessPreviewFindings\",\n \"access-analyzer:ListAccessPreviews\",\n \"access-analyzer:ListPolicyGenerations\",\n \"access-analyzer:StartPolicyGeneration\",\n \"access-analyzer:ValidatePolicy\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # Elemental Activations\n results = get_actions_for_service(\"elemental-activations\")\n actions = [\n \"elemental-activations:CompleteAccountRegistration\",\n \"elemental-activations:StartAccountRegistration\"\n ]\n for action in actions:\n self.assertTrue(action in results)\n # OpenSearch\n results = get_actions_for_service(\"es\")\n actions = [\n \"es:DescribeDomainChangeProgress\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # Location\n results = get_actions_for_service(\"geo\")\n actions = [\n \"geo:CalculateRouteMatrix\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # Amazon Managed Grafana\n results = get_actions_for_service(\"grafana\")\n actions = [\n \"grafana:DescribeWorkspaceAuthentication\",\n \"grafana:UpdateWorkspaceAuthentication\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # EC2 Image Builder\n results = get_actions_for_service(\"imagebuilder\")\n actions = [\n \"imagebuilder:ImportVmImage\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n # Timestream\n results = get_actions_for_service(\"timestream\")\n actions = [\n \"timestream:CreateScheduledQuery\",\n \"timestream:DeleteScheduledQuery\",\n \"timestream:DescribeScheduledQuery\",\n \"timestream:ExecuteScheduledQuery\",\n \"timestream:ListScheduledQueries\",\n \"timestream:UpdateScheduledQuery\",\n ]\n for action in actions:\n self.assertTrue(action in results)\n\n # AWS Transfer Family\n results = get_actions_for_service(\"transfer\")\n actions = [\n \"transfer:CreateAccess\",\n \"transfer:CreateWorkflow\",\n \"transfer:DeleteAccess\",\n \"transfer:DeleteWorkflow\",\n \"transfer:DescribeAccess\",\n \"transfer:DescribeExecution\",\n \"transfer:DescribeWorkflow\",\n \"transfer:ListAccesses\",\n \"transfer:ListExecutions\",\n \"transfer:ListWorkflows\",\n \"transfer:SendWorkflowStepState\",\n \"transfer:UpdateAccess\",\n ]\n for action in actions:\n self.assertTrue(action in results)", "def test_add_hive_partition(self, mock_logging):\n self.client.add_hive_partition(None)\n\n assert_true(mock_logging.error.called)", "def test_neg_opearte_with_incorrect_polic(self):\n key = (\"test\", \"demo\", 1)\n policy = {\"total_timeout\": 0.5}\n llist = [\n {\"op\": aerospike.OPERATOR_PREPEND, \"bin\": \"name\", \"val\": \"ram\"},\n {\"op\": aerospike.OPERATOR_INCR, \"bin\": \"age\", \"val\": 3},\n {\"op\": aerospike.OPERATOR_READ, \"bin\": \"name\"},\n ]\n\n try:\n self.as_connection.operate(key, llist, {}, policy)\n\n except e.ParamError as exception:\n assert exception.code == -2", "def test_verify_vlan_with_lagg0_parent_interface_using_secondary_failover_group_functions(driver):", "def test_change_provisioned_throughput_usual_case():", "def test_HYD965(self):\n host = synthetic_host(\"myserver\")\n self.create_simple_filesystem(host)\n\n spare_volume = synthetic_volume_full(host)\n\n response = self.api_client.post(\n \"/api/target/\", data={\"kind\": \"OST\", \"filesystem_id\": self.fs.id, \"volume_id\": spare_volume.id}\n )\n self.assertHttpAccepted(response)\n\n response = self.api_client.post(\n \"/api/target/\", data={\"kind\": \"OST\", \"filesystem_id\": self.fs.id, \"volume_id\": spare_volume.id}\n )\n self.assertHttpBadRequest(response)", "def test_operation_no_args(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\"Vac | 0\\n\")\n assert bb.operations == [{\"modes\": [0], \"op\": \"Vac\"}]", "def test_with_persistence_issues(self):\n\n if self.durability_level in [\n Bucket.DurabilityLevel.MAJORITY_AND_PERSIST_TO_ACTIVE,\n Bucket.DurabilityLevel.PERSIST_TO_MAJORITY]:\n self.log.critical(\"Test not valid for persistence durability\")\n return\n\n error_sim = dict()\n shell_conn = dict()\n cbstat_obj = dict()\n failover_info = dict()\n vb_info_info = dict()\n active_vbs_in_target_nodes = list()\n failover_info[\"init\"] = dict()\n failover_info[\"afterCrud\"] = dict()\n vb_info_info[\"init\"] = dict()\n vb_info_info[\"afterCrud\"] = dict()\n\n self.log.info(\"Selecting nodes to simulate error condition\")\n target_nodes = DurabilityHelper.getTargetNodes(self.cluster,\n self.nodes_init,\n self.num_nodes_affected)\n\n self.log.info(\"Simulate error condition on %s\" % target_nodes)\n for node in target_nodes:\n cbstat_obj[node.ip] = Cbstats(node)\n active_vbs_in_target_nodes += cbstat_obj[node.ip].vbucket_list(\n self.bucket.name,\n \"active\")\n vb_info_info[\"init\"][node.ip] = cbstat_obj[node.ip].vbucket_seqno(\n self.bucket.name)\n failover_info[\"init\"][node.ip] = \\\n cbstat_obj[node.ip].failover_stats(self.bucket.name)\n\n if self.simulate_error \\\n in [DiskError.DISK_FULL, DiskError.DISK_FAILURE]:\n error_sim = DiskError(self.log, self.task_manager,\n self.cluster.master, target_nodes,\n 60, 0, False, 120,\n disk_location=\"/data\")\n error_sim.create(action=self.simulate_error)\n else:\n for node in target_nodes:\n # Create shell_connections\n shell_conn[node.ip] = RemoteMachineShellConnection(node)\n\n # Perform specified action\n error_sim[node.ip] = CouchbaseError(self.log,\n shell_conn[node.ip],\n node=node)\n error_sim[node.ip].create(self.simulate_error,\n bucket_name=self.bucket.name)\n\n # Perform CRUDs with induced error scenario is active\n load_spec = dict()\n load_spec[\"doc_crud\"] = dict()\n load_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.CREATE_PERCENTAGE_PER_COLLECTION] = 100\n load_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.UPDATE_PERCENTAGE_PER_COLLECTION] = 25\n load_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.DELETE_PERCENTAGE_PER_COLLECTION] = 25\n load_spec[\"doc_crud\"][\n MetaCrudParams.DocCrud.COMMON_DOC_KEY] = \"test_collections\"\n\n self.log.info(\"Perform 'create', 'update', 'delete' mutations\")\n doc_loading_task = \\\n self.bucket_util.run_scenario_from_spec(\n self.task,\n self.cluster,\n self.cluster.buckets,\n load_spec,\n mutation_num=1,\n async_load=True)\n\n # Perform new scope/collection creation during doc ops in parallel\n self.__perform_collection_crud(mutation_num=2)\n\n # Wait for doc_loading to complete and validate the doc ops\n self.task_manager.get_task_result(doc_loading_task)\n self.bucket_util.validate_doc_loading_results(doc_loading_task)\n if doc_loading_task.result is False:\n self.log_failure(\"Doc CRUDs failed with persistence issue\")\n\n if self.simulate_error \\\n in [DiskError.DISK_FULL, DiskError.DISK_FAILURE]:\n error_sim.revert(self.simulate_error)\n else:\n # Revert the induced error condition\n for node in target_nodes:\n error_sim[node.ip].revert(self.simulate_error,\n bucket_name=self.bucket.name)\n\n # Disconnect the shell connection\n shell_conn[node.ip].disconnect()\n self.sleep(10, \"Wait for node recovery to complete\")\n\n # Doc count validation\n self.bucket_util._wait_for_stats_all_buckets(self.cluster,\n self.cluster.buckets)\n self.bucket_util.validate_docs_per_collections_all_buckets(\n self.cluster)\n\n # Fetch latest failover stats and validate the values are updated\n self.log.info(\"Validating failover and seqno cbstats\")\n for node in target_nodes:\n vb_info_info[\"afterCrud\"][node.ip] = \\\n cbstat_obj[node.ip].vbucket_seqno(self.bucket.name)\n failover_info[\"afterCrud\"][node.ip] = \\\n cbstat_obj[node.ip].failover_stats(self.bucket.name)\n\n # Failover validation\n val = \\\n failover_info[\"init\"][node.ip] \\\n == failover_info[\"afterCrud\"][node.ip]\n error_msg = \"Failover stats got updated\"\n self.assertTrue(val, msg=error_msg)\n\n # Seq_no validation (High level)\n val = \\\n vb_info_info[\"init\"][node.ip] \\\n != vb_info_info[\"afterCrud\"][node.ip]\n self.assertTrue(val, msg=\"vbucket seq_no not updated after CRUDs\")\n\n self.validate_test_failure()\n\n # Doc count validation\n self.bucket_util._wait_for_stats_all_buckets(self.cluster,\n self.cluster.buckets)\n self.bucket_util.validate_docs_per_collections_all_buckets(\n self.cluster)", "def _executesqltest(self, operation):\n # Use handle to query\n return self.session.execute_sql_test(self._statement_cache.get_statement(), operation)", "def test_dumpling_with_missing_chef(self, packet_dumpling_dict):\n del packet_dumpling_dict['metadata']['chef']\n\n with pytest.raises(InvalidDumpling):\n validate_dumpling(json.dumps(packet_dumpling_dict))", "def test_calculate_supervisory_delta_put(self):\n SDP = calculate_supervisory_delta_put()\n \n self.assertEqual(SDP, -0.27)", "def test_partition_tables_no_partition(sdc_builder, sdc_executor, gcp):\n bucket_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n dataset_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n table_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n dataset_el_var = \"${record:attribute('sdc.dataset.name')}\"\n table_el_var = \"${record:attribute('sdc.table.name')}\"\n records_count = 20\n\n # Build the pipeline\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n # Dev data generator\n dev_data_generator = pipeline_builder.add_stage('Dev Data Generator')\n dev_data_generator.set_attributes(batch_size=10,\n records_to_be_generated=records_count,\n fields_to_generate=[\n {\"type\": \"POKEMON\", \"field\": \"name\"}\n ])\n\n # Build Expression Evaluator\n expression_evaluator = pipeline_builder.add_stage('Expression Evaluator')\n expression_evaluator.set_attributes(header_attribute_expressions=[\n {'attributeToSet': 'sdc.dataset.name',\n 'headerAttributeExpression': dataset_name},\n {'attributeToSet': 'sdc.table.name',\n 'headerAttributeExpression': table_name}]\n )\n\n # Google BigQuery destination stage\n bigquery = pipeline_builder.add_stage(name=DESTINATION_STAGE_NAME)\n bigquery.set_attributes(project_id=gcp.project_id,\n dataset=dataset_el_var,\n table=table_el_var,\n bucket=bucket_name,\n enable_data_drift=True,\n create_table=True,\n create_dataset=True,\n purge_stage_file_after_ingesting=True,\n partition_table=True,\n partition_configuration=[\n {\"dataset\": \"wrong_dataset\",\n \"table\": \"wrong_table\",\n \"partitionType\": \"INGESTION\",\n \"timePartitionType\": \"MONTH\",\n \"timePartitionExpiration\": 0}\n ])\n\n dev_data_generator >> expression_evaluator >> bigquery\n\n pipeline = pipeline_builder.build().configure_for_environment(gcp)\n\n bigquery_client = gcp.bigquery_client\n dataset_ref = DatasetReference(gcp.project_id, dataset_name)\n\n try:\n logger.info(f'Creating temporary bucket {bucket_name}')\n bucket = gcp.retry_429(gcp.storage_client.create_bucket)(bucket_name)\n\n sdc_executor.add_pipeline(pipeline)\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n # Verify by reading records using Google BigQuery client\n table = bigquery_client.get_table(f'{dataset_name}.{table_name}')\n data_from_bigquery = [tuple(row.values()) for row in bigquery_client.list_rows(table)]\n data_from_bigquery.sort()\n\n # Assert table is not partitioned\n assert not table.time_partitioning\n # And that we have records in the table\n assert len(data_from_bigquery) == records_count\n finally:\n _clean_up_bigquery(bigquery_client, dataset_ref)\n _clean_up_gcs(gcp, bucket, bucket_name)", "def test_fresh_vasp_fw_run(self):\n output_path = os.path.join(os.getcwd(), 'output')\n Path(output_path).mkdir(exist_ok=True)\n db_path = os.path.join(output_path, 'test.db')\n os.chdir(output_path) # change directory to contain vasp output\n db = connect('test.db')\n initial_index = db.write(molecule('H2O'))\n calc_spec = {\"is_zeolite\": True}\n spec = {\"database_path\": db_path,\n \"input_id\": initial_index,\n \"calculation_type\": \"dry_run\",\n \"calc_spec\": calc_spec,\n \"structure_type\": \"zeo\"}\n\n test_ft = vasp_db.VASPDB()\n test_ft.set_env_vars = MagicMock()\n # VASP cannot be run locally and thus we have to mock the calc_energy\n # method in VASP\n do_nothing_mock = Mock()\n do_nothing_mock.side_effect = self.generate_fake_output\n test_ft.do_nothing = do_nothing_mock\n output_fw = test_ft.run_task(spec)\n output_index = output_fw.stored_data['output_index']\n\n with self.subTest('assert correct folder name created'):\n self.assertTrue('vasp_' + str(spec['calculation_type']) + '_' + str(initial_index))\n\n with self.subTest('assert files copied over'):\n original_dir = os.listdir('/Users/dda/Desktop/fish/fireworks/vasp_fw_tests/data/fake_vasp_output/00_opt')\n new_dir = os.listdir('vasp_' + str(spec['calculation_type']) + '_' + str(initial_index))\n self.assertCountEqual(original_dir, new_dir)\n\n with self.subTest('assert new atoms object added'):\n original_atoms = db.get_atoms(initial_index)\n added_atoms = db.get_atoms(output_index)\n for a1, a2 in zip(original_atoms, added_atoms):\n self.assertNotEqual(a1.symbol, a2.symbol)\n self.assertEqual(a2.symbol, 'Po')\n for p1, p2 in zip(a1.position, a2.position):\n self.assertEqual(p1, p2)", "def test_start(self):\n keys = ('symbol', 'date', 'dte', 'strike', 'dte_iv', 'strike_iv', 'impl_vol')\n symbols = ('AIG', 'C', 'DDD', 'DIS', 'FSLR', 'JPM')\n for symbol in symbols:\n print 'symbol: %s' % symbol.upper()\n calc = DayIVCalc(symbol)\n calc.start()\n\n db = pd.HDFStore(os.path.join(QUOTE_DIR, '%s.h5' % symbol.lower()))\n df_iv = db.select('/option/day_iv')\n db.close()\n\n self.assertTrue(len(df_iv))\n for key in keys:\n self.assertIn(key, df_iv.columns)", "def test_neg_opearte_on_same_bin(self):\n key = (\"test\", \"demo\", 1)\n policy = {}\n llist = [\n {\"op\": aerospike.OPERATOR_PREPEND, \"bin\": \"name\", \"val\": \"ram\"},\n {\"op\": aerospike.OPERATOR_APPEND, \"bin\": \"name\", \"val\": \"aa\"},\n {\"op\": aerospike.OPERATOR_INCR, \"bin\": \"age\", \"val\": 3},\n {\"op\": aerospike.OPERATOR_READ, \"bin\": \"name\"},\n ]\n\n try:\n self.as_connection.operate(key, llist, {}, policy)\n\n except e.InvalidRequest as exception:\n assert exception.code == 4", "def test_vsg_for_multiple_vcpes_in_vsg_vm_with_one_vcpe_going_down(self):", "def test_hup(self):\n process_tree = self.get_process_tree()\n initial_zygote = self.get_zygote(process_tree)\n os.kill(self.proc.pid, signal.SIGHUP)\n time.sleep(1)\n\n process_tree = self.get_process_tree()\n final_zygote = self.get_zygote(process_tree)\n assert_not_equal(initial_zygote, final_zygote)", "def testHealthAssessStoolSoft(self):\n attr = self.session.create_visit_attr()\n\n self.util.intTypeTest(self, attr, \"stool_soft\")\n\n self.util.intPropertyTest(self, attr, \"stool_soft\")", "def test_create_hyperflex_hxdp_version(self):\n pass" ]
[ "0.589975", "0.5518395", "0.545446", "0.5445042", "0.5427462", "0.53870016", "0.5262899", "0.52119577", "0.5174843", "0.51490927", "0.51376146", "0.5104962", "0.5064738", "0.5062418", "0.5041236", "0.5039861", "0.50327873", "0.5029942", "0.4999761", "0.4995236", "0.499059", "0.4989332", "0.49752155", "0.4927598", "0.492695", "0.49151185", "0.49121088", "0.49089715", "0.49053422", "0.4891149" ]
0.6844616
0
Given a filter name, import and return the filter class. By default, filter modules are searched within the ``ufo2ft.filters`` package.
def getFilterClass(filterName, pkg="ufo2ft.filters"): # TODO add support for third-party plugin discovery? # if filter name is 'Foo Bar', the module should be called 'fooBar' filterName = filterName.replace(" ", "") moduleName = filterName[0].lower() + filterName[1:] module = importlib.import_module(".".join([pkg, moduleName])) # if filter name is 'Foo Bar', the class should be called 'FooBarFilter' className = filterName[0].upper() + filterName[1:] if not className.endswith("Filter"): className += "Filter" return getattr(module, className)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loadFilterFromString(spec):\n return _loadPluginFromString(spec, \"ufo2ft.filters\", isValidFilter)", "def get_filter(name):\n try:\n return FILTERS[name.upper()]\n except:\n msg = 'Unknown model of filter {}, options are {}'\n raise ValueError(msg.format(name, list(FILTERS.keys())))", "def standard_filters():\n classes = []\n filters_dir = __path__[0]\n for dirpath, dirnames, filenames in os.walk(filters_dir):\n relpath = os.path.relpath(dirpath, filters_dir)\n if relpath == '.':\n relpkg = ''\n else:\n relpkg = '.%s' % '.'.join(relpath.split(os.sep))\n for fname in filenames:\n root, ext = os.path.splitext(fname)\n if ext != '.py' or root == '__init__':\n continue\n module_name = \"%s%s.%s\" % (__package__, relpkg, root)\n mod_classes = _get_filter_classes_from_module(module_name)\n classes.extend(mod_classes)\n return classes", "def _get_filter_classes_from_module(module_name):\n classes = []\n module = utils.import_object(module_name)\n for obj_name in dir(module):\n itm = getattr(module, obj_name)\n if _is_filter_class(itm):\n classes.append(itm)\n return classes", "def get_filter_classes(filter_class_names):\n classes = []\n for cls_name in filter_class_names:\n obj = utils.import_class(cls_name)\n if _is_filter_class(obj):\n classes.append(obj)\n elif type(obj) is types.FunctionType:\n # Get list of classes from a function\n classes.extend(obj())\n else:\n raise exception.ClassNotFound(class_name=cls_name,\n exception='Not a valid scheduler filter')\n return classes", "def __init__(self, classname=None, jobject=None, options=None):\n if jobject is None:\n jobject = Filter.new_instance(classname)\n self.enforce_type(jobject, \"weka.filters.Filter\")\n super(Filter, self).__init__(jobject=jobject, options=options)", "def __init__(self, image, filter_name, cutoff, order = 0):\n self.filter_name = filter_name\n self.image = image\n if filter_name == 'ideal_l':\n self.filter = self.get_ideal_low_pass_filter\n elif filter_name == 'ideal_h':\n self.filter = self.get_ideal_high_pass_filter\n elif filter_name == 'butterworth_l':\n self.filter = self.get_butterworth_low_pass_filter\n elif filter_name == 'butterworth_h':\n self.filter = self.get_butterworth_high_pass_filter\n elif filter_name == 'gaussian_l':\n self.filter = self.get_gaussian_low_pass_filter\n elif filter_name == 'gaussian_h':\n self.filter = self.get_gaussian_high_pass_filter\n\n self.cutoff = cutoff\n self.order = order", "def _load_filter(self, fname, interp=True, lamb=None, *args, **kwargs):\n try:\n fil = UnitFilter.from_ascii(fname, *args, **kwargs)\n except Exception:\n content = self.content\n r = [k for k in content if fname in k]\n\n if len(r) <= 0: # try all lower for filenames (ascii convention)\n r = [k for k in content if fname.lower() in k]\n\n if len(r) > 1:\n print(\"auto correction found multiple choices\")\n print(r)\n raise ValueError('Refine name to one of {0}'.format(r))\n elif len(r) <= 0:\n raise ValueError('Cannot find filter {0}'.format(fname))\n else:\n fil = UnitFilter.from_ascii(r[0], *args, **kwargs)\n if (interp is True) and (lamb is not None):\n return fil.reinterp(lamb)\n else:\n return fil", "def loadFilters(ufo):\n preFilters, postFilters = [], []\n for filterDict in ufo.lib.get(FILTERS_KEY, []):\n namespace = filterDict.get(\"namespace\", \"ufo2ft.filters\")\n try:\n filterClass = getFilterClass(filterDict[\"name\"], namespace)\n except (ImportError, AttributeError):\n from pprint import pformat\n\n logger.exception(\"Failed to load filter: %s\", pformat(filterDict))\n continue\n filterObj = filterClass(\n *filterDict.get(\"args\", []),\n include=filterDict.get(\"include\"),\n exclude=filterDict.get(\"exclude\"),\n pre=filterDict.get(\"pre\", False),\n **filterDict.get(\"kwargs\", {}),\n )\n if filterObj.pre:\n preFilters.append(filterObj)\n else:\n postFilters.append(filterObj)\n return preFilters, postFilters", "def make_filter(name, schema):\n return HSMFilter(name, schema)", "def __init__(self, image, filter_name, cutoff, order = 0):\r\n self.image = image\r\n if filter_name == 'ideal_l':\r\n self.filter = self.get_ideal_low_pass_filter\r\n elif filter_name == 'ideal_h':\r\n self.filter = self.get_ideal_high_pass_filter\r\n elif filter_name == 'butterworth_l':\r\n self.filter = self.get_butterworth_low_pass_filter\r\n elif filter_name == 'butterworth_h':\r\n self.filter = self.get_butterworth_high_pass_filter\r\n elif filter_name == 'gaussian_l':\r\n self.filter = self.get_gaussian_low_pass_filter\r\n elif filter_name == 'gaussian_h':\r\n self.filter = self.get_gaussian_high_pass_filter\r\n\r\n self.cutoff = cutoff\r\n self.order = order\r\n self.filter_name = filter_name", "def add_filter(self, filter_):\n assert has_pil, _(\"Cannot add filters without python PIL\")\n self.cache.basename += filter_.basename\n self._filters.append(filter_)", "def _load_filter(self, *args, **kwargs):\n raise NotImplementedError", "def __getitem__(self, name):\n with self as s:\n try:\n f = s._load_filter(name)\n except TypeError:\n f = [s._load_filter(k) for k in name]\n return f", "def __getitem__(self, name):\n with self as s:\n try:\n f = s._load_filter(name)\n except TypeError:\n f = [s._load_filter(k) for k in name]\n return f", "def __getitem__(self, name):\n with self as s:\n try:\n f = s._load_filter(name)\n except TypeError:\n f = [s._load_filter(k) for k in name]\n return f", "def get_filter_pillar(filter_name, pillar_key=\"acl\", pillarenv=None, saltenv=None):\n pillar_cfg = _get_pillar_cfg(pillar_key, pillarenv=pillarenv, saltenv=saltenv)\n return _lookup_element(pillar_cfg, filter_name)", "def make_filter_specification(cls, filter_string):\n try:\n return parse_filter(filter_string)\n except ParseException as err:\n raise ValueError('Expression parameters have errors. %s' % err)", "def filter(self, name=None):\n def wrapper(fn):\n if name is not None:\n _name = name\n else:\n _name = fn.__name__\n\n if _name in self._filters:\n raise Error(\"Filter already defined: {0}\".format(_name))\n\n self._filters[_name] = fn\n return fn\n return wrapper", "def load_filters(self, names, interp=True, lamb=None, filterLib=None):\n with self as s:\n filters = [s._load_filter(fname, interp=interp, lamb=lamb)\n for fname in names]\n return(filters)", "def get_exact_filter_by_name(self, name):\n for entry in self.filters:\n if (entry['type'] == 'filter' and entry['name'] == name and\n entry['comparator'] == 'equals'):\n return entry", "def load_filters(self, names, interp=True, lamb=None, filterLib=None):\n filters = [self._load_filter(fname, interp=interp, lamb=lamb)\n for fname in names]\n return(filters)", "def _load_filter(self, fname, interp=True, lamb=None):\n ftab = self.hdf\n if hasattr(fname, 'decode'):\n fnode = ftab.get_node('/filters/' + fname.decode('utf8'))\n else:\n fnode = ftab.get_node('/filters/' + fname)\n flamb = fnode[:]['WAVELENGTH']\n transmit = fnode[:]['THROUGHPUT']\n dtype = 'photon'\n unit = None\n\n attrs = fnode.attrs\n if 'DETECTOR' in attrs:\n dtype = attrs['DETECTOR']\n if 'WAVELENGTH_UNIT' in attrs:\n unit = attrs['WAVELENGTH_UNIT']\n\n fil = UnitFilter(flamb, transmit, name=fnode.name,\n dtype=dtype, unit=unit)\n\n if interp & (lamb is not None):\n fil = fil.reinterp(lamb)\n return fil", "def initialize(module_name):\n \n global filter_function\n global debug\n \n # Get the level of debug\n debug = int(rule_manager.get_property(None, module_name, 'debug'))\n\n filter_function = process_filters.initialize_filter(module_name)\n\n return", "def add_filter(self, filter_):\n assert has_pil, _(\"Cannot add filters without python PIL\")\n self.cache.basename += filter_.basename\n self._filters.append(filter_)", "def construct_class_by_name(name, *args, **kwargs):\n parts = name.split('.')\n module_name, class_name = '.'.join(parts[:-1]), parts[-1]\n module = importlib.import_module(module_name)\n return getattr(module, class_name)(*args, **kwargs)", "def new(name):\r\n\r\n g_filter(name)", "def filter_factory(global_conf, **local_conf):\n conf = global_conf.copy()\n conf.update(local_conf)\n\n def ext_filter(app):\n return UrlRewriteFilter(app, conf)\n return ext_filter", "def filter_factory(global_conf, **local_conf):\n conf = global_conf.copy()\n conf.update(local_conf)\n\n def auth_filter(app):\n return DevAuth(app, conf)\n return auth_filter", "def my_import(module_name, class_name):\n\n\t# load the module, will raise ImportError if module cannot be loaded\n\tm = importlib.import_module(module_name)\n\n\t# get the class, will raise AttributeError if class cannot be found\n\tc = getattr(m, class_name)\n\n\treturn c" ]
[ "0.6654967", "0.63984025", "0.6100772", "0.595048", "0.5546851", "0.55364734", "0.55064267", "0.55002147", "0.54450935", "0.54266375", "0.54121363", "0.53471565", "0.5288375", "0.52491266", "0.52491266", "0.52491266", "0.52397", "0.52103484", "0.51934093", "0.5180745", "0.5180579", "0.51761055", "0.5174125", "0.5157936", "0.51343375", "0.50926125", "0.507982", "0.50551724", "0.50509435", "0.50501317" ]
0.8785171
0
Parse custom filters from the ufo's lib.plist. Return two lists, one for the filters that are applied before decomposition of composite glyphs, another for the filters that are applied after decomposition.
def loadFilters(ufo): preFilters, postFilters = [], [] for filterDict in ufo.lib.get(FILTERS_KEY, []): namespace = filterDict.get("namespace", "ufo2ft.filters") try: filterClass = getFilterClass(filterDict["name"], namespace) except (ImportError, AttributeError): from pprint import pformat logger.exception("Failed to load filter: %s", pformat(filterDict)) continue filterObj = filterClass( *filterDict.get("args", []), include=filterDict.get("include"), exclude=filterDict.get("exclude"), pre=filterDict.get("pre", False), **filterDict.get("kwargs", {}), ) if filterObj.pre: preFilters.append(filterObj) else: postFilters.append(filterObj) return preFilters, postFilters
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_filters(filters_str):\n fltrs = []\n for part in str(filters_str).lower().split(\",\"):\n if part==\"blur\":\n fltrs.append(filters.blur(1))\n elif part==\"distort\":\n fltrs.append(filters.distort(18))\n\n return fltrs", "def parse_filter(value):\n\n if value.endswith('+pol'):\n pol = True\n value = value[:-4]\n else:\n pol = False\n\n if value in ufti_filters:\n return (ufti_filters[value], pol)\n\n else:\n logger.warning('Filter ' + value + ' is not recognised')\n return (None, pol)", "def GetFilterListsFromString(cls, parser_filter_string):\n includes = []\n excludes = []\n\n if not parser_filter_string:\n return includes, excludes\n\n preset_categories = presets.categories.keys()\n\n for filter_string in parser_filter_string.split(u','):\n filter_string = filter_string.strip()\n if not filter_string:\n continue\n\n if filter_string.startswith(u'-'):\n active_list = excludes\n filter_string = filter_string[1:]\n else:\n active_list = includes\n\n filter_string = filter_string.lower()\n if filter_string in cls._parser_classes:\n parser_class = cls._parser_classes[filter_string]\n active_list.append(filter_string)\n\n if parser_class.SupportsPlugins():\n active_list.extend(parser_class.GetPluginNames())\n\n elif filter_string in preset_categories:\n active_list.extend(\n presets.GetParsersFromCategory(filter_string))\n\n else:\n active_list.append(filter_string)\n\n return includes, excludes", "def set_filter():\n try:\n #=======================================================================\n # isofilter=[arg.partition('=')[-1] for arg in argv if 'atomfilter=' in arg][0][1:-1].split(',')\n #=======================================================================\n isofilter = config.arg('atomfilter')[1:-1].split(',')\n isofilter = [f.split(':') for f in isofilter]\n for f in isofilter:\n if len(f) < 2:\n f.append('True')\n if len(f) < 3:\n f.append('True')\n if len(f) < 4:\n f.append('None')\n except:\n isofilter = [['element', 'H', 'True', 'None']]\n try:\n #=======================================================================\n # isopartnerfilter=[arg.partition('=')[-1] for arg in argv if 'partnerfilter=' in arg][0][1:-1].split(',')\n #=======================================================================\n isopartnerfilter = config.arg('partnerfilter')[1:-1].split(',')\n isopartnerfilter = [f.split(':') for f in isopartnerfilter]\n for f in isopartnerfilter:\n if len(f) < 2:\n f.append('True')\n if len(f) < 3:\n f.append('True')\n if len(f) < 4:\n f.append('None')\n except:\n isopartnerfilter = [['None', 'None', 'None', 'None']]\n return isofilter, isopartnerfilter\n isofilterlist = []\n isopartnerfilterlist = []\n for i in xrange(len(isofilter) / 2):\n isofilterlist.append(tuple(isofilter[2 * i:2 * i + 2]))\n for i in xrange(len(isopartnerfilter) / 2):\n isopartnerfilterlist.append(tuple(isopartnerfilter[2 * i:2 * i + 2]))\n\n return [isofilterlist, isopartnerfilterlist]", "def get_filters(self):", "def extract_filters(self):\n self.filters = self.controller.filters\n\n self.extract_core_stats()\n self.extract_abilities()\n # goes through and adds all list-based filters\n for filterType, elements in self.filters.items():\n if type(elements) == list and len(elements) > 0:\n self.extract_filter_list(filterType, elements)", "def loadFilterFromString(spec):\n return _loadPluginFromString(spec, \"ufo2ft.filters\", isValidFilter)", "def get_filters() -> List[Tuple[str, Callable]]:\n return [\n ('group_files', group_files),\n ('timesince', timesince),\n ('just_updated', just_updated),\n ('get_category_name', get_category_name),\n ('process_status_display', process_status_display),\n ('compilation_status_display', compilation_status_display),\n ('duration', duration),\n ('tidy_filesize', tidy_filesize),\n ('asdict', asdict),\n ('compilation_log_display', compilation_log_display)\n ]", "def filter(self, filters):", "def GetParserFilterListsFromString(cls, parser_filter_string):\n if not parser_filter_string:\n return [], []\n\n # Build the plugin to parser map, which cannot be a class member\n # otherwise the map will become invalid if a parser with plugins\n # is deregistered.\n plugin_to_parser_map = {}\n for parser_name, parser_class in cls._parser_classes.iteritems():\n if parser_class.SupportsPlugins():\n for plugin_name in parser_class.GetPluginNames():\n plugin_to_parser_map[plugin_name] = parser_name\n\n includes = set()\n excludes = set()\n\n preset_categories = presets.categories.keys()\n\n for filter_string in parser_filter_string.split(u','):\n filter_string = filter_string.strip()\n if not filter_string:\n continue\n\n if filter_string.startswith(u'-'):\n active_list = excludes\n filter_string = filter_string[1:]\n else:\n active_list = includes\n\n filter_string = filter_string.lower()\n if filter_string in cls._parser_classes:\n active_list.add(filter_string)\n\n elif filter_string in preset_categories:\n for entry in presets.GetParsersFromCategory(filter_string):\n active_list.add(plugin_to_parser_map.get(entry, entry))\n\n else:\n active_list.add(plugin_to_parser_map.get(\n filter_string, filter_string))\n\n return list(includes), list(excludes)", "def load_custom_filters(environment):\n\n # TODO deprecate ipaddr_index and netmask for the better ipnet ones\n filter_list = {\n 'dpkg_arch': filter_dpkg_arch,\n 'storage_size_num': filter_storage_size_num,\n 'ipnet_hostaddr': filter_ipnet_hostaddr,\n 'ipnet_hostmin': filter_ipnet_hostmin,\n 'ipnet_hostmax': filter_ipnet_hostmax,\n 'ipnet_broadcast': filter_ipnet_broadcast,\n 'ipnet_netmask': filter_ipnet_netmask,\n 'ipnet_contains_ip': filter_ipnet_contains_ip,\n 'ipnet_contains_iprange': filter_ipnet_contains_iprange,\n 'ipnet_range_size': filter_ipnet_range_size,\n 'ipaddr_index': filter_ipaddr_index,\n 'netmask': filter_netmask\n }\n\n for name, function in filter_list.items():\n environment.filters[name] = function", "def parse_for_filters(query_string):\n if ';' in query_string:\n strings = query_string.split(';')\n else:\n strings = query_string.split('&')\n\n filters = []\n leftovers = [] \n for string in strings:\n query = cgi.parse_qs(string)\n try:\n key, value = query.items()[0]\n\n try:\n argument = unicode(value[0], 'UTF-8')\n except TypeError:\n argument = value[0]\n\n func = FILTER_PARSERS[key](argument)\n filters.append(func)\n except(KeyError, IndexError):\n leftovers.append(string)\n\n leftovers = ';'.join(leftovers)\n return filters, leftovers", "def parse_filter(self, tokens):\n if not tokens:\n self._parser_state.error('missing filter')\n filters = []\n if tokens[0].type == 'LBRACE':\n opening_brace = tokens.pop(0)\n while tokens:\n filters.append(self._parse_single_filter(tokens))\n if not tokens or tokens[0].type != 'COMMA':\n break\n tokens.pop(0)\n if not tokens or tokens[0].type != 'RBRACE':\n self._parser_state.error('unclosed brace', token=opening_brace)\n tokens.pop(0)\n else:\n filters.append(self._parse_single_filter(tokens))\n return filters", "def filters(self):\n # easy enough\n return self.dcpl.getFilters()", "def _ParseFilterOptions(self, options):\n names = [u'date_filters', u'filter_file']\n helpers_manager.ArgumentHelperManager.ParseOptions(\n options, self, names=names)\n\n extensions_string = self.ParseStringOption(options, u'extensions_string')\n self._ParseExtensionsString(extensions_string)\n\n names_string = getattr(options, u'names_string', None)\n self._ParseNamesString(names_string)\n\n signature_identifiers = getattr(options, u'signature_identifiers', None)\n try:\n self._ParseSignatureIdentifiers(\n self._data_location, signature_identifiers)\n except (IOError, ValueError) as exception:\n raise errors.BadConfigOption(exception)\n\n if self._filter_file:\n self.has_filters = True\n else:\n self.has_filters = self._filter_collection.HasFilters()", "def initFilters(CONFIG):\n\t#### String containing all the preLibraries file name\n\tpreLibraries = findFile(\"classification_result/prelibraries/TE\", \"*.fasta\")\n\n\t#### String containing all the preLibraries file name\n\tnoCatLibrarie = findFile(\"classification_result/prelibraries/\", \"noCat.fasta\")\n\n\tlistPrelibraries = []\n\t#### dictionnaries that will contains all the id's sequences for concerned libraries\n\tdicoLibraries={\"autonomousLib\":[], \"totalTELib\":[], \"totalRepeatLib\":[]}\n\n\tlistPrelibraries.append(noCatLibrarie[0])\n\t#### Add all the name of prelibraries in listPrelibraries\n\tfor file in preLibraries:\n\t\tlistPrelibraries.append(file)\n\n\t#### Dictionnary that restain the final classification for a given sequence (helpfull for the intermediateLibraries)\n\tdicoFinalClassif={}\n\t#### Parse all the prelibrary\n\tprint(\"####\tApply the filters to create the intermediate libraries\")\n\tcreateIntermediateLibraries(listPrelibraries, dicoLibraries, CONFIG, dicoFinalClassif)\n\n\t#### List containing all the intermediate librarie file name\n\tintermediateLibraries = findFile(\"classification_result/intermediateLibraries\", \"*.fasta\")\n\n\tprint(\"####\tApply the cd-hit-est on the intermediate libraries\")\n\tapplyCDHIT(intermediateLibraries)\n\n\tretriveFinalLibrarieSequences(intermediateLibraries, CONFIG, dicoFinalClassif, dicoLibraries)\n\n\tprint(\"####\tCreation of the three final libraries\")\n\tcreateFinalLibraries(intermediateLibraries, dicoLibraries)\n\n\tprint(\"Number of sequences in autonomousTE : {nbAutonomous}\\nNumber of sequences in totalTE : {nbTotalTE}\\nNumber of sequences in totalRepeatLib : {nbRepeated}\".format(\\\n\tnbAutonomous=len(dicoLibraries[\"autonomousLib\"]), nbTotalTE=len(dicoLibraries[\"totalTELib\"]), nbRepeated=len(dicoLibraries[\"totalRepeatLib\"])))", "def __init__(self, config, *parse_list):\n super(ParsingFilter, self).__init__()\n self.config = config\n try:\n if (\n self.config[\"filter\"][\"whitelist\"]\n and self.config[\"filter\"][\"blacklist\"]\n ):\n _LOGGER.warning(\n _(\n \"Both whitelist and blacklist filters found in configuration. \"\n \"Only one can be used at a time - only the whitelist filter will be used.\"\n )\n )\n self.parse_list = [\n logging.Filter(name) for name in parse_list[0][\"whitelist\"]\n ]\n except KeyError:\n self.parse_list = parse_list[0].get(\"whitelist\") or parse_list[0].get(\n \"blacklist\"\n )\n\n self.parse_list = [logging.Filter(name) for name in self.parse_list]", "def extract_filter_list(self, filter_type, elements):\n titleLabel = QLabel(filter_type)\n titleLabel.setStyleSheet('font: 20pt \"Imprint MT Shadow\"; color: #ffffff;')\n grid = QGridLayout()\n self.filterVbox.addWidget(titleLabel, alignment=Qt.AlignCenter)\n self.filterVbox.addLayout(grid)\n\n counter = 0\n for element in elements:\n nextLabel = QLabel(element)\n nextLabel.setStyleSheet('font: 12pt \"Times New Roman\"; color: rgb(188, 189, 177);')\n grid.addWidget(nextLabel, math.floor(counter/3), counter % 3, alignment=Qt.AlignCenter)\n counter += 1", "def item_filters(self):\n\n # we use \"video\" since that's the mimetype category.\n return [\"photoshop.document\"]", "def get_filter_settings(options): \n \n if options.filternames != '-':\n filter_names = options.filternames.split(',')\n else:\n hdf_in = h5py.File(options.filters, 'r')\n filter_names = sorted(hdf_in.keys())\n hdf_in.close()\n\n if options.filtercombs != '-':\n filter_combs = []\n for fc in options.filtercombs.split(':'):\n filter_combs.append(fc.split(','))\n filter_combs[-1] = [int(x) for x in filter_combs[-1]]\n else:\n filter_combs = [[x] for x in range(len(filter_names))]\n\n if options.filtertypes == '-':\n filter_types = ['any'] * len(filter_names)\n else:\n ft = options.filtertypes.split(',')\n if len(ft) == 1:\n filter_types = [ft[0]] * len(filter_names)\n else:\n assert(len(ft) == len(filter_names))\n filter_types = ft\n \n return (filter_names, filter_combs, filter_types)", "def _parse_filter_list(filter: Optional[List]) -> Any:\n ret = None\n if filter:\n ret = set(filter)\n return ret", "def _filter(self, __button):\r\n# WARNING: Refactor _filter; current McCabe Complexity metric = 54.\r\n _criteria = []\r\n _inputs = []\r\n _compound = []\r\n\r\n # Read the user inputs for the different fields that can be used to\r\n # filter with.\r\n _criteria.append(self.cmbCriteriaID.get_active_text())\r\n _inputs.append(self.txtFilterID.get_text())\r\n _compound.append(self.cmbCompound1.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaCategory.get_active_text())\r\n _inputs.append(self.cmbFilterCategory.get_active())\r\n _compound.append(self.cmbCompound2.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaType.get_active_text())\r\n _inputs.append(self.cmbFilterType.get_active())\r\n _compound.append(self.cmbCompound3.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaStatus.get_active_text())\r\n _inputs.append(self.cmbFilterStatus.get_active())\r\n _compound.append(self.cmbCompound4.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaCriticality.get_active_text())\r\n _inputs.append(self.cmbFilterCriticality.get_active())\r\n _compound.append(self.cmbCompound5.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaAge.get_active_text())\r\n _inputs.append(self.txtFilterAge.get_text())\r\n _compound.append(self.cmbCompound6.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaLifeCycle.get_active_text())\r\n _inputs.append(self.cmbFilterLifeCycle.get_active())\r\n _compound.append(self.cmbCompound7.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaShortDesc.get_active_text())\r\n _inputs.append(self.txtFilterShortDesc.get_text())\r\n _compound.append(self.cmbCompound8.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaLongDesc.get_active_text())\r\n _inputs.append(self.txtFilterLongDesc.get_text())\r\n _compound.append(self.cmbCompound9.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaRemarks.get_active_text())\r\n _inputs.append(self.txtFilterRemarks.get_text())\r\n _compound.append(self.cmbCompound10.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaAnalysis.get_active_text())\r\n _inputs.append(self.txtFilterAnalysis.get_text())\r\n _compound.append(self.cmbCompound11.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaTest.get_active_text())\r\n _inputs.append(self.txtFilterTest.get_text())\r\n _compound.append(self.cmbCompound12.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaTestCase.get_active_text())\r\n _inputs.append(self.txtFilterTestCase.get_text())\r\n _compound.append(self.cmbCompound13.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaRequestBy.get_active_text())\r\n _inputs.append(self.cmbFilterRequestBy.get_active_text())\r\n _compound.append(self.cmbCompound14.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaRequestDate.get_active_text())\r\n _inputs.append(self.txtFilterRequestDate.get_text())\r\n _compound.append(self.cmbCompound15.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaReviewBy.get_active_text())\r\n _inputs.append(self.cmbFilterReviewBy.get_active_text())\r\n _compound.append(self.cmbCompound16.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaReviewDate.get_active_text())\r\n _inputs.append(self.txtFilterReviewDate.get_text())\r\n _compound.append(self.cmbCompound17.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaApproveBy.get_active_text())\r\n _inputs.append(self.cmbFilterApproveBy.get_active_text())\r\n _compound.append(self.cmbCompound18.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaApproveDate.get_active_text())\r\n _inputs.append(self.txtFilterApproveDate.get_text())\r\n _compound.append(self.cmbCompound19.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaCloseBy.get_active_text())\r\n _inputs.append(self.cmbFilterCloseBy.get_active_text())\r\n _compound.append(self.cmbCompound20.get_active_text())\r\n\r\n _criteria.append(self.cmbCriteriaCloseDate.get_active_text())\r\n _inputs.append(self.txtFilterCloseDate.get_text())\r\n _compound.append(self.cmbCompound21.get_active_text())\r\n\r\n _inputs.append(self.chkFilterAccepted.get_active())\r\n _compound.append(self.cmbCompound22.get_active_text())\r\n\r\n _inputs.append(self.chkFilterReviewed.get_active())\r\n\r\n _criteria.append(self.cmbCriteriaAssembly.get_active_text())\r\n _model = self.cmbAssembly.get_model()\r\n _row = self.cmbAssembly.get_active_iter()\r\n if _row is not None:\r\n _text = int(_model.get_value(_row, 1))\r\n else:\r\n _text = 0\r\n _inputs.append(_text)\r\n _compound.append(self.cmbCompound23.get_active_text())\r\n\r\n # Build the query from the user-provided inputs.\r\n if all(_c is None for _c in _criteria):\r\n query = None\r\n elif Configuration.RTK_MODULES[0] == 1:\r\n query = \"SELECT * FROM rtk_incident \\\r\n WHERE fld_revision_id={0:d} AND \".format(\r\n self._revision_id)\r\n else:\r\n query = \"SELECT * FROM rtk_incident \\\r\n WHERE fld_revision_id=0 AND \"\r\n\r\n if _criteria[0] is not None and _criteria[0] != '':\r\n query = query + \"fld_incident_id\" + _criteria[0] + _inputs[0]\r\n if _compound[0] is not None and _compound[0] != '':\r\n query = query + \" \" + _compound[0] + \" \"\r\n\r\n if _criteria[1] is not None and _criteria[1] != '':\r\n query = query + \"fld_incident_category\" + _criteria[1] + \\\r\n str(_inputs[1])\r\n if _compound[1] is not None and _compound[1] != '':\r\n query = query + \" \" + _compound[1] + \" \"\r\n\r\n if _criteria[2] is not None and _criteria[2] != '':\r\n query = query + \"fld_incident_type\" + _criteria[2] + \\\r\n str(_inputs[2])\r\n if _compound[2] is not None and _compound[2] != '':\r\n query = query + \" \" + _compound[2] + \" \"\r\n\r\n if _criteria[3] is not None and _criteria[3] != '':\r\n query = query + \"fld_status\" + _criteria[3] + str(_inputs[3])\r\n if _compound[3] is not None and _compound[3] != '':\r\n query = query + \" \" + _compound[3] + \" \"\r\n\r\n if _criteria[4] is not None and _criteria[4] != '':\r\n query = query + \"fld_criticality\" + _criteria[4] + str(_inputs[4])\r\n if _compound[4] is not None and _compound[4] != '':\r\n query = query + \" \" + _compound[4] + \" \"\r\n\r\n if _criteria[5] is not None and _criteria[5] != '':\r\n query = query + \"fld_incident_age\" + _criteria[5] + str(_inputs[5])\r\n if _compound[5] is not None and _compound[5] != '':\r\n query = query + \" \" + _compound[5] + \" \"\r\n\r\n if _criteria[6] is not None and _criteria[6] != '':\r\n query = query + \"fld_life_cycle\" + _criteria[6] + str(_inputs[6])\r\n if _compound[6] is not None and _compound[6] != '':\r\n query = query + \" \" + _compound[6] + \" \"\r\n\r\n if _criteria[21] is not None and _criteria[21] != '':\r\n query = query + \"fld_hardware_id\" + _criteria[21] + \\\r\n str(_inputs[23])\r\n if _compound[22] is not None and _compound[22] != '':\r\n query = query + \" \" + _compound[22] + \" \"\r\n\r\n if _criteria[7] is not None and _criteria[7] != '':\r\n query = query + \"fld_short_description \" + _criteria[7] + \\\r\n \" '%\" + _inputs[7] + \"%'\"\r\n if _compound[7] is not None and _compound[7] != '':\r\n query = query + \" \" + _compound[7] + \" \"\r\n\r\n if _criteria[8] is not None and _criteria[8] != '':\r\n query = query + \"fld_long_description \" + _criteria[8] + \\\r\n \" '%\" + _inputs[8] + \"%'\"\r\n if _compound[8] is not None and _compound[8] != '':\r\n query = query + \" \" + _compound[8] + \" \"\r\n\r\n if _criteria[9] is not None and _criteria[9] != '':\r\n query = query + \"fld_remarks \" + _criteria[9] + \\\r\n \" '%\" + _inputs[9] + \"%'\"\r\n if _compound[9] is not None and _compound[9] != '':\r\n query = query + \" \" + _compound[9] + \" \"\r\n\r\n if _criteria[10] is not None and _compound[10] != '':\r\n query = query + \"fld_analysis \" + _criteria[10] + \\\r\n \" '%\" + _inputs[10] + \"%'\"\r\n if _compound[10] is not None and _compound[10] != '':\r\n query = query + \" \" + _compound[10] + \" \"\r\n\r\n if _criteria[11] is not None and _compound[11] != '':\r\n query = query + \"fld_test_found \" + _criteria[11] + \\\r\n \" '%\" + _inputs[11] + \"%'\"\r\n if _compound[11] is not None and _compound[11] != '':\r\n query = query + \" \" + _compound[11] + \" \"\r\n\r\n if _criteria[12] is not None and _compound[12] != '':\r\n query = query + \"fld_test_case \" + _criteria[12] + \\\r\n \" '%\" + _inputs[12] + \"%'\"\r\n if _compound[12] is not None and _compound[12] != '':\r\n query = query + \" \" + _compound[12] + \" \"\r\n\r\n if _criteria[13] is not None and _compound[13] != '':\r\n query = query + \"fld_request_by\" + _criteria[13] + \\\r\n \"'\" + _inputs[13] + \"'\"\r\n if _compound[13] is not None and _compound[13] != '':\r\n query = query + \" \" + _compound[13] + \" \"\r\n\r\n if _criteria[14] is not None and _compound[14] != '':\r\n query = query + \"fld_request_date\" + _criteria[14] + \\\r\n str(datetime.strptime(_inputs[14], \"%Y-%m-%d\").toordinal())\r\n if _compound[14] is not None and _compound[14] != '':\r\n query = query + \" \" + _compound[14] + \" \"\r\n\r\n if _criteria[15] is not None and _compound[15] != '':\r\n query = query + \"fld_reviewed_by\" + _criteria[15] + \\\r\n \"'\" + _inputs[15] + \"'\"\r\n if _compound[15] is not None and _compound[15] != '':\r\n query = query + \" \" + _compound[15] + \" \"\r\n\r\n if _criteria[16] is not None and _compound[16] != '':\r\n query = query + \"fld_reviewed_date\" + _criteria[16] + \\\r\n str(datetime.strptime(_inputs[16], \"%Y-%m-%d\").toordinal())\r\n if _compound[16] is not None and _compound[16] != '':\r\n query = query + \" \" + _compound[16] + \" \"\r\n\r\n if _criteria[17] is not None and _compound[17] != '':\r\n query = query + \"fld_approved_by\" + _criteria[17] + \\\r\n \"'\" + _inputs[17] + \"'\"\r\n if _compound[17] is not None and _compound[17] != '':\r\n query = query + \" \" + _compound[17] + \" \"\r\n\r\n if _criteria[18] is not None and _compound[18] != '':\r\n query = query + \"fld_approved_date\" + _criteria[18] + \\\r\n str(datetime.strptime(_inputs[18], \"%Y-%m-%d\").toordinal())\r\n if _compound[18] is not None and _compound[18] != '':\r\n query = query + \" \" + _compound[18] + \" \"\r\n\r\n if _criteria[19] is not None and _compound[19] != '':\r\n query = query + \"fld_complete_by\" + _criteria[19] + \\\r\n \"'\" + _inputs[19] + \"'\"\r\n if _compound[19] is not None and _compound[19] != '':\r\n query = query + \" \" + _compound[19] + \" \"\r\n\r\n if _criteria[20] is not None and _compound[20] != '':\r\n query = query + \"fld_complete_date\" + _criteria[20] + \\\r\n str(datetime.strptime(_inputs[20], \"%Y-%m-%d\").toordinal())\r\n if _compound[20] is not None and _compound[20] != '':\r\n query = query + \" \" + _compound[20] + \" \"\r\n\r\n if _inputs[21]:\r\n query = query + \"fld_accepted=%d\" % 1\r\n if _compound[21] is not None and _compound[21] != '':\r\n query = query + \" \" + _compound[21] + \" \"\r\n\r\n if _inputs[22]:\r\n query = query + \"fld_reviewed=%d\" % 1\r\n\r\n self._modulebook.request_filter_incidents(self._revision_id, query)", "def _make_filters(self):\n\n \"\"\"\n filter_bank = bandpass_filterbank(\n self.bands, fs=self.fs, order=order, output=output\n )\n\n return [lambda sig: sosfiltfilt(bpf, sig) for bpf in filter_bank]\n \"\"\"\n\n # This seems to work only for Octave bands out of the box\n centers = self.centers\n n = len(self.centers)\n\n new_bands = [[centers[0] / 2, centers[1]]]\n for i in range(1, n - 1):\n new_bands.append([centers[i - 1], centers[i + 1]])\n new_bands.append([centers[-2], self.fs / 2])\n\n n_freq = self.n_fft // 2 + 1\n freq_resp = np.zeros((n_freq, n))\n freq = np.arange(n_freq) / self.n_fft * self.fs\n\n for b, (band, center) in enumerate(zip(new_bands, centers)):\n lo = np.logical_and(band[0] <= freq, freq < center)\n freq_resp[lo, b] = 0.5 * (1 + np.cos(2 * np.pi * freq[lo] / center))\n\n if b != n - 1:\n hi = np.logical_and(center <= freq, freq < band[1])\n freq_resp[hi, b] = 0.5 * (1 - np.cos(2 * np.pi * freq[hi] / band[1]))\n else:\n hi = center <= freq\n freq_resp[hi, b] = 1.0\n\n filters = np.fft.fftshift(\n np.fft.irfft(freq_resp, n=self.n_fft, axis=0),\n axes=[0],\n )\n\n # remove the first sample to make them odd-length symmetric filters\n self.filters = filters[1:, :]", "def load_filters(self, names, interp=True, lamb=None, filterLib=None):\n filters = [self._load_filter(fname, interp=interp, lamb=lamb)\n for fname in names]\n return(filters)", "def _add_filters(self, filter_list, filter_path):\n if not isinstance(filter_list, list):\n raise errors.ParserError(\n \"Loading filter-file {} failed. \"\n \"Expecting value of 'filter_list' entry to be a list \"\n \"but instead its a {}.\".format(filter_path, type(filter_list)))\n\n for cur_filter in filter_list:\n self._add_filter(cur_filter, filter_path)", "def load_filters(self, names, interp=True, lamb=None, filterLib=None):\n with self as s:\n filters = [s._load_filter(fname, interp=interp, lamb=lamb)\n for fname in names]\n return(filters)", "def filters():\n return {\"reform_vlans\": FilterModule.reform_vlans}", "def get_filter(cls, filter, odata=False):\n\n if filter:\n #www.odata.org/libraries\n if odata:\n lst_filter = []\n if 'and' in filter:\n tmp_filters = filter.split('and')\n else:\n tmp_filters = [filter, ]\n for tmp_filter in tmp_filters:\n if 'eq' in tmp_filter:\n tmp_filter = tmp_filter.replace('eq', '=')\n elif 'gt' in tmp_filter:\n tmp_filter = tmp_filter.raplace('gt', '>')\n elif 'lt' in tmp_filter:\n tmp_filter = tmp_filter.replace('lt', '>')\n lst_filter.append(tmp_filter.split())\n return lst_filter\n else:\n dict_filter = {}\n for lst_attribut in filter.split(','):\n attribut = lst_attribut.split(':')\n if \"/\" in attribut[1]:\n dict_filter[attribut[0]] = attribut[1].split('/')\n else:\n if attribut[1] == 'false':\n dict_filter[attribut[0]] = False\n elif attribut[1] == 'true':\n dict_filter[attribut[0]] = True\n else:\n dict_filter[attribut[0]] = attribut[1]\n return dict_filter\n return False", "def _check_prefilters(\n part_model: \"Part\", prefilters: Union[Dict, List]\n) -> List[PropertyValueFilter]: # noqa: F821\n if isinstance(prefilters, dict):\n property_models: List[Property, str] = prefilters.get(\n MetaWidget.PROPERTY_MODELS, []\n ) # noqa\n values = prefilters.get(MetaWidget.VALUES, [])\n filters_type = prefilters.get(MetaWidget.FILTERS_TYPE, [])\n\n if any(len(lst) != len(property_models) for lst in [values, filters_type]):\n raise IllegalArgumentError(\n 'The lists of \"property_models\", \"values\" and \"filters_type\" should be the '\n \"same length.\"\n )\n prefilters = [\n PropertyValueFilter(\n property_model=pf[0],\n value=pf[1],\n filter_type=pf[2],\n )\n for pf in zip(property_models, values, filters_type)\n ]\n\n warnings.warn(\n \"Prefilters must be provided as list of `PropertyValueFilter` objects. \"\n \"Separate input lists will be deprecated in January 2021.\", # TODO Deprecate January 2021\n PendingDeprecationWarning,\n )\n\n elif not all(isinstance(pf, PropertyValueFilter) for pf in prefilters):\n raise IllegalArgumentError(\n \"`prefilters` must be a list of PropertyValueFilter objects.\"\n )\n\n if part_model:\n [pf.validate(part_model=part_model) for pf in prefilters]\n\n return prefilters", "def filters(self):\n\t\treturn self.local_filter" ]
[ "0.6801903", "0.642236", "0.631748", "0.62891066", "0.59724027", "0.5944512", "0.588867", "0.58861756", "0.5837576", "0.57184476", "0.56906873", "0.5643275", "0.5557998", "0.54352415", "0.54245675", "0.5410087", "0.54082674", "0.5403556", "0.53947294", "0.5388267", "0.53639424", "0.53580344", "0.53506935", "0.5346607", "0.53436136", "0.53420377", "0.53417474", "0.5337802", "0.5336663", "0.5325581" ]
0.65959525
1
Return True if 'klass' is a valid filter class. A valid filter class is a class (of type 'type'), that has a '__call__' (bound method), with the signature matching the same method
def isValidFilter(klass): if not isclass(klass): logger.error(f"{klass!r} is not a class") return False if not callable(klass): logger.error(f"{klass!r} is not callable") return False if getfullargspec(klass.__call__).args != getfullargspec(BaseFilter.__call__).args: logger.error(f"{klass!r} '__call__' method has incorrect signature") return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_filter_class(cls):\n return type(cls) is types.TypeType and issubclass(cls, BaseHostFilter)", "def match(self, cls):\n return isinstance(self, cls)", "def class_is(cls: Class) -> bool:\n pass", "def match(cls, kind: 'dsl.Any') -> bool:\n return isinstance(kind, cls)", "def predicate(cls: nodes.ClassDef) -> bool:\n if cls.name in CLASS_NAME_SKIPLIST:\n # class looks like an API model class, but it isn't.\n return False\n\n if not cls.name.endswith(\"API\") and \"schema\" not in cls.locals:\n # class does not look like an API model class.\n return False\n\n return True", "def filter_func(fieldname):\n if fieldname.startswith('_'):\n return False\n value = getattr(class_, fieldname)\n \n return isinstance(value, type)", "def _check(self, class_):\r\n\r\n if isinstance(class_, (types.FunctionType, types.LambdaType,\r\n types.ClassType, types.InstanceType)):\r\n return False\r\n if not hasattr(class_, '__dict__'):\r\n if not hasattr(class_, '__slots__'):\r\n return False\r\n return True", "def predicate(obj):\n return inspect.isclass(obj) and issubclass(obj, MafColumnRecord)", "def class_is_type(cls, *seg_type: str) -> bool:\n # Use set intersection\n if cls._class_types.intersection(seg_type):\n return True\n return False", "def is_kind_of_class(obj, a_class):\n return(isinstance(obj, a_class))", "def is_kind_of_class(obj, a_class):\n return (isinstance(obj, a_class))", "def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)", "def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)", "def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)", "def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)", "def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)", "def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)", "def is_kind_of_class(obj, a_class):\n\n return (isinstance(obj, a_class))", "def is_kind_of_class(obj, a_class):\n if isinstance(obj, a_class) is True:\n return True\n else:\n return False", "def is_kind_of_class(obj, a_class):\n\n return isinstance(obj, a_class)", "def is_kind_of_class(obj, a_class):\n if isinstance(obj, a_class):\n return True\n return False", "def is_kind_of_class(obj, a_class):\n if isinstance(obj, a_class):\n return True\n else:\n return False", "def is_kind_of_class(obj, a_class):\n if isinstance(obj, a_class):\n return True\n else:\n return False", "def is_kind_of_class(obj, a_class):\n\n if isinstance(obj, a_class):\n return True\n else:\n return False", "def __instancecheck__(self, instance):\n\n if isinstance(instance, ObjCInstance):\n return bool(instance.isKindOfClass(self))\n else:\n return False", "def isValidFeatureWriter(klass):\n if not isclass(klass):\n logger.error(\"%r is not a class\", klass)\n return False\n if not hasattr(klass, \"tableTag\"):\n logger.error(\"%r does not have required 'tableTag' attribute\", klass)\n return False\n if not hasattr(klass, \"write\"):\n logger.error(\"%r does not have a required 'write' method\", klass)\n return False\n if getfullargspec(klass.write).args != getfullargspec(BaseFeatureWriter.write).args:\n logger.error(\"%r 'write' method has incorrect signature\", klass)\n return False\n return True", "def isclass(object):\r\n return isinstance(object, (type, types.ClassType))", "def _is_mechanism_spec(spec):\n if inspect.isclass(spec) and issubclass(spec, Mechanism):\n return True\n if isinstance(spec, Mechanism):\n return True\n return False", "def is_Fit_subclass(cls: Type[Fit]) -> bool:\n try:\n if issubclass(cls, Fit) and (cls is not Fit):\n return True\n else:\n return False\n except TypeError:\n return False", "def isinstance(self, class_or_string):\n if class_or_string is None:\n return False\n import inspect\n if inspect.isclass(class_or_string):\n return isinstance(self, class_or_string)\n else:\n return self.__class__.__name__.lower() == class_or_string.lower()" ]
[ "0.8063194", "0.66540086", "0.66224563", "0.6453369", "0.63146883", "0.62318975", "0.61231554", "0.610161", "0.6072466", "0.6028027", "0.5993078", "0.5985867", "0.5985867", "0.5985867", "0.5985867", "0.5985867", "0.5985867", "0.59668183", "0.5951617", "0.5935815", "0.5927806", "0.59093046", "0.59093046", "0.58582604", "0.5828157", "0.5800988", "0.5792799", "0.5784961", "0.5762976", "0.57578915" ]
0.85365546
0
Take a string specifying a filter class to load (either a builtin filter or one defined in an external, userdefined module), initialize it with given options and return the filter object.
def loadFilterFromString(spec): return _loadPluginFromString(spec, "ufo2ft.filters", isValidFilter)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, classname=None, jobject=None, options=None):\n if jobject is None:\n jobject = Filter.new_instance(classname)\n self.enforce_type(jobject, \"weka.filters.Filter\")\n super(Filter, self).__init__(jobject=jobject, options=options)", "def getFilterClass(filterName, pkg=\"ufo2ft.filters\"):\n # TODO add support for third-party plugin discovery?\n # if filter name is 'Foo Bar', the module should be called 'fooBar'\n filterName = filterName.replace(\" \", \"\")\n moduleName = filterName[0].lower() + filterName[1:]\n module = importlib.import_module(\".\".join([pkg, moduleName]))\n # if filter name is 'Foo Bar', the class should be called 'FooBarFilter'\n className = filterName[0].upper() + filterName[1:]\n if not className.endswith(\"Filter\"):\n className += \"Filter\"\n return getattr(module, className)", "def __init__(self, jobject=None, options=None):\n if jobject is None:\n classname = \"weka.filters.MultiFilter\"\n jobject = MultiFilter.new_instance(classname)\n self.enforce_type(jobject, \"weka.filters.MultiFilter\")\n super(MultiFilter, self).__init__(jobject=jobject, options=options)", "def __init__(self, image, filter_name, cutoff, order = 0):\n self.filter_name = filter_name\n self.image = image\n if filter_name == 'ideal_l':\n self.filter = self.get_ideal_low_pass_filter\n elif filter_name == 'ideal_h':\n self.filter = self.get_ideal_high_pass_filter\n elif filter_name == 'butterworth_l':\n self.filter = self.get_butterworth_low_pass_filter\n elif filter_name == 'butterworth_h':\n self.filter = self.get_butterworth_high_pass_filter\n elif filter_name == 'gaussian_l':\n self.filter = self.get_gaussian_low_pass_filter\n elif filter_name == 'gaussian_h':\n self.filter = self.get_gaussian_high_pass_filter\n\n self.cutoff = cutoff\n self.order = order", "def __init__(self, image, filter_name, cutoff, order = 0):\r\n self.image = image\r\n if filter_name == 'ideal_l':\r\n self.filter = self.get_ideal_low_pass_filter\r\n elif filter_name == 'ideal_h':\r\n self.filter = self.get_ideal_high_pass_filter\r\n elif filter_name == 'butterworth_l':\r\n self.filter = self.get_butterworth_low_pass_filter\r\n elif filter_name == 'butterworth_h':\r\n self.filter = self.get_butterworth_high_pass_filter\r\n elif filter_name == 'gaussian_l':\r\n self.filter = self.get_gaussian_low_pass_filter\r\n elif filter_name == 'gaussian_h':\r\n self.filter = self.get_gaussian_high_pass_filter\r\n\r\n self.cutoff = cutoff\r\n self.order = order\r\n self.filter_name = filter_name", "def __init__(self, \n cutoff_frequency, \n order, \n filter_type=\"maximally_flat\"):\n self.cutoff_freq = cutoff_frequency\n self.order = order\n self.filter_type = filter_type\n \n #TODO: Initialise filter based on maximally flat prototype", "def _load_filter(self, fname, interp=True, lamb=None, *args, **kwargs):\n try:\n fil = UnitFilter.from_ascii(fname, *args, **kwargs)\n except Exception:\n content = self.content\n r = [k for k in content if fname in k]\n\n if len(r) <= 0: # try all lower for filenames (ascii convention)\n r = [k for k in content if fname.lower() in k]\n\n if len(r) > 1:\n print(\"auto correction found multiple choices\")\n print(r)\n raise ValueError('Refine name to one of {0}'.format(r))\n elif len(r) <= 0:\n raise ValueError('Cannot find filter {0}'.format(fname))\n else:\n fil = UnitFilter.from_ascii(r[0], *args, **kwargs)\n if (interp is True) and (lamb is not None):\n return fil.reinterp(lamb)\n else:\n return fil", "def make_filter_specification(cls, filter_string):\n try:\n return parse_filter(filter_string)\n except ParseException as err:\n raise ValueError('Expression parameters have errors. %s' % err)", "def __init__(self, source, parameter='', file_path=None):\n super().__init__() \n self.filter_type = 'data'\n self.source = source\n self.parameter = parameter\n self._initate_filter_items()\n if file_path:\n self.load_filter_file(file_path)", "def prepare_advanced_filter(filter_options: str) -> dict:\n import ast\n import json\n\n if filter_options:\n if os.path.isfile(filter_options):\n with open(filter_options, 'r') as f:\n filter_options = json.load(f)\n # advanced filter do not specify collections!\n if 'collections' in filter_options:\n del filter_options['collections']\n else:\n filter_options = ast.literal_eval(filter_options)\n return filter_options\n return None", "def create_filter_from_args(self, args: dict) -> Filter:\n keys = set(args.keys())\n filter_args = {}\n\n if \"name\" in keys:\n value = args.get('name')\n if value != \"\":\n filter_args.update({\"text_filter\": args.get('name')})\n if \"product_in\" in keys:\n value = args.get('product_in')\n if value != \"\":\n filter_args.update({\"product_in\": 'true' if value == \"yes\" else 'false'})\n if \"human_in\" in keys:\n value = args.get('human_in')\n if value != \"\":\n filter_args.update({\"human_in\": 'true' if value == \"yes\" else 'false'})\n if \"institutional\" in keys:\n value = args.get('institutional')\n if value != \"\":\n filter_args.update({\"institutional\": 'true' if value == \"yes\" else 'false'})\n if \"format\" in keys:\n value = args.get('format')\n if value != \"\":\n filter_args.update({\"picture_format\": 'true' if value == \"vertical\" else 'false'})\n if \"credit\" in keys:\n value = args.get('credit')\n if value != \"\":\n filter_args.update({\"author_credits\": value})\n if \"limited_use\" in keys:\n value = args.get('limited_use')\n if value != \"\":\n filter_args.update({\"limited_usage\": 'true' if value == \"yes\" else 'false'})\n if \"tags\" in keys:\n value = args.get('tags')\n if value != \"\":\n filter_args.update({\"limited_usage\": value.split(';')})\n\n f = Filter(**filter_args)\n return f", "def initialize(module_name):\n \n global filter_function\n global debug\n \n # Get the level of debug\n debug = int(rule_manager.get_property(None, module_name, 'debug'))\n\n filter_function = process_filters.initialize_filter(module_name)\n\n return", "def __init__(self, filters, use_include_order):\n self.filters = filters\n self.use_include_order = use_include_order", "def _set_filter_type(filter):\n if filter == 'nat':\n return '-N'\n if filter == 'options':\n return '-O'\n if filter == 'filter':\n return '-R'", "def _load_filter(self, *args, **kwargs):\n raise NotImplementedError", "def init(self, *args):\n return _ida_hexrays.udc_filter_t_init(self, *args)", "def ParseOptions(cls, options, configuration_object):\n if not isinstance(configuration_object, tools.CLITool):\n raise errors.BadConfigObject(\n 'Configuration object is not an instance of CLITool')\n\n filter_file = cls._ParseStringOption(options, 'file_filter')\n\n # Search the data location for the filter file.\n if filter_file and not os.path.isfile(filter_file):\n if configuration_object.data_location:\n filter_file_basename = os.path.basename(filter_file)\n filter_file_path = os.path.join(\n configuration_object.data_location, filter_file_basename)\n if os.path.isfile(filter_file_path):\n filter_file = filter_file_path\n\n if filter_file and not os.path.isfile(filter_file):\n raise errors.BadConfigOption(\n f'No such collection filter file: {filter_file:s}')\n\n setattr(configuration_object, '_filter_file', filter_file)", "def _ParseFilterOptions(self, options):\n names = [u'date_filters', u'filter_file']\n helpers_manager.ArgumentHelperManager.ParseOptions(\n options, self, names=names)\n\n extensions_string = self.ParseStringOption(options, u'extensions_string')\n self._ParseExtensionsString(extensions_string)\n\n names_string = getattr(options, u'names_string', None)\n self._ParseNamesString(names_string)\n\n signature_identifiers = getattr(options, u'signature_identifiers', None)\n try:\n self._ParseSignatureIdentifiers(\n self._data_location, signature_identifiers)\n except (IOError, ValueError) as exception:\n raise errors.BadConfigOption(exception)\n\n if self._filter_file:\n self.has_filters = True\n else:\n self.has_filters = self._filter_collection.HasFilters()", "def __init__(self, source, parameter='', file_path=None):\n super().__init__() \n self.filter_type = 'tolerance'\n self.source = source\n self.parameter = parameter\n self._initate_filter_items()\n if file_path:\n self.load_filter_file(file_path)", "def __init__(self, type: int, filter: int):\n ...", "def _set_filters(self, options):\n if options.keywords:\n self.filters[\"keywords\"] = string_to_list(options.keywords)\n if options.features:\n self.filters[\"features\"] = string_to_list(options.features)\n if options.authors:\n self.filters[\"authors\"] = string_to_list(options.authors)\n if options.version:\n self.filters[\"version\"] = options.version", "def from_string(cls,\n string_option: str,\n *args,\n **kwargs) -> virus_type:\n _option = ''.join(\n re.findall(r'[0-9a-zA-Z]', string_option)\n ).lower()\n\n for subclass in cls.__subclasses__():\n if subclass.__name__.lower() == _option:\n return subclass(*args, **kwargs)\n\n else:\n raise NotImplementedError(\n f'No corresponding class found for option \"{string_option}\"'\n )", "def __init__(self, init=None, filter_table=None, filter_name=None,\n filter_type=None, **kwargs):\n super(MiriFilter, self).__init__(init=init, **kwargs)\n\n # Data type is filter.\n self.meta.filetype = 'FILTER'\n \n # Define the filter name and type, if given\n if filter_name is not None:\n self.meta.instrument.filter = filter_name\n if filter_type is not None:\n self.meta.instrument.filter_type = filter_type\n\n if filter_table is not None:\n try:\n self.filter_table = filter_table\n except (ValueError, TypeError) as e:\n strg = \"filter_table must be a numpy record array or list of records.\"\n strg += \"\\n %s\" % str(e)\n raise TypeError(strg)\n \n # Define the wavelength units.\n# units = self.get_data_units('filter_table')\n \n # Cached arrays\n self._wavelength = None\n self._transmission = None\n self._interptransmission = None", "def __init__(self) -> None:\r\n self.filters: list[Filter] = []", "def __init__( self, filters=None, prx=None ):\n\n if filters is None:\n if prx is None:\n\n self._filter_list = rts2_wwwapi.rts2comm().get_filters()\n\n elif type(filters) == list:\n self._filter_list = filters\n\n elif type(filters) == dict:\n raise TypeError(\"Filters are should not be a dict, it probably should be None\")\n # this assumes that the keywords of the dictionary are \n # the fitler names and the value is the filter number. \n\n\n #sort by filter number and reverse look up. \n # this doesn't work in python3\n #for key, value in sorted(filters.iteritems(), key=lambda (k,v): (v,k)):\n #self._filter_list.append( key )\n\n elif type(filters) == str or type(filters) == unicode:\n self._filter_list = str(filters).split()\n\n else:\n raise TypeError(\"Unexpected filter type {}, type must be string, unicode, list or dict\".format(type(filters)))", "def __init__(self,\n filter_order=DEFAULT_ORDER, cutoff_freq_hz=DEFAULT_CUTOFF_HZ,\n zero_phase=False):\n self.filter_order = filter_order\n self.cutoff_freq_hz = cutoff_freq_hz\n self.zero_phase = zero_phase", "def filter_factory(global_conf, **local_conf):\n conf = global_conf.copy()\n conf.update(local_conf)\n\n def ext_filter(app):\n return UrlRewriteFilter(app, conf)\n return ext_filter", "def init_from_string(self, fs_in, param_string):\n if '(' in param_string:\n name_params_re = re.compile(r'(\\w*)\\((.*)\\)$')\n pieces = name_params_re.match(param_string)\n name = pieces.group(1)\n params = pieces.group(2)\n param_list = params.split(';')\n param_dict = {}\n for param in param_list:\n if '=' not in param:\n raise ValueError('preprocess param %s missing a value.' % param)\n k, v = param.split('=', 1)\n if v.isdigit():\n v = int(v)\n else:\n try:\n v = float(v)\n except ValueError:\n pass\n param_dict[k] = v\n self._name = name\n self.init_highpass(param_dict['highpass_cutoff'],\n param_dict['highpass_order'])\n self.init_channel_numbers(param_dict['channel_numbers'])\n else:\n self.__init__(self, fs_in, param_string)", "def __init__(self_, filter: Union[LogsFilter, UnsetType] = unset, name: Union[str, UnsetType] = unset, **kwargs):\n if filter is not unset:\n kwargs[\"filter\"] = filter\n if name is not unset:\n kwargs[\"name\"] = name\n super().__init__(kwargs)", "def __init__(self, id, filter, opt=defaults):\n self.id = id\n self.filter = filter\n self.host = opt['host']\n self.com_port = opt['com_port']\n self.set_port = opt['set_port']\n self.res_port = opt['res_port']" ]
[ "0.70017433", "0.6447948", "0.6132342", "0.612842", "0.60046303", "0.5954532", "0.59166795", "0.59039634", "0.58298296", "0.5802773", "0.5751299", "0.5747215", "0.5737991", "0.57208437", "0.56715316", "0.56667185", "0.56580377", "0.56450737", "0.56390226", "0.5600757", "0.55901366", "0.5576162", "0.5539542", "0.548949", "0.5488215", "0.54487514", "0.54396504", "0.5402215", "0.53960174", "0.5392279" ]
0.66894424
1
Iterate over lines and yield line itself with info whether the given line represents a checkbox.
def _iterate_lines(cls, text) -> typing.Generator[str, None, None]: for line in text.split('\n'): yield line, line.lstrip().startswith(cls._CHECKBOX)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_line(self, line):\n ltype = self.line_type(line)\n if ltype == 'gene':\n self.process_gene_line(line)\n return True\n elif ltype == 'mRNA':\n self.process_mrna_line(line)\n return True\n elif ltype == 'CDS':\n self.process_cds_line(line)\n return True\n elif ltype == 'exon':\n self.process_exon_line(line)\n return True\n elif ltype == 'start_codon' or ltype == 'stop_codon':\n self.process_other_feature_line(line)\n return True\n else:\n self.skipped_features += 1\n return False", "def line_generator(self):\n for V in self.Vrepresentation():\n if V.is_line():\n yield V", "def get_line(lines):\n for line in lines:\n yield line", "def get_line(lines):\n for line in lines:\n yield line", "def assert_true_isinstance(logical_line):\n if assert_true_isinstance_re.match(logical_line):\n yield (0, \"M316: assertTrue(isinstance(a, b)) sentences not allowed\")", "def truth(self, line, cell = None, bools = (\"False\", \"True\")):\n if line:\n print(truth(*line.split(\", \"), bools = bools))\n elif cell:\n print(truth(*cell.strip().split(\"\\n\"), bools = bools))", "def checklist_line(line, day, checklist):\n\n output_lines = f'<b>Checklist items due before class on {day}</b>'\n output_lines += f'<ul class=\"checklist\" id=\"check-list-{day}\">'\n for item in line[2:].split(' | '):\n item = item.strip()\n item_without_markdown_link = re.sub(r\"\\[(.+)\\]\\(.+\\)\", r\"\\1\", item)\n if day in checklist and item_without_markdown_link in checklist[day]:\n output_lines += f'<li class=\"unchecked checked\">{item}</li>'\n else:\n output_lines += f'<li class=\"unchecked\">{item}</li>'\n if item_without_markdown_link == 'Practice (no points) worksheet demo.':\n print(output_lines)\n output_lines += '</ul>'\n return output_lines", "def assert_true_instance(logical_line):\n if asse_trueinst_re.match(logical_line):\n yield (0, \"G316: assertTrue(isinstance(a, b)) sentences not allowed\")", "def _do_checkbox_setting(cls, text: str, title: str, replace_args: tuple) -> typing.Tuple[str, bool, bool]:\n title = title.strip()\n\n lines = []\n found = False\n modified = False\n for line, is_checkbox in cls._iterate_lines(text):\n if is_checkbox and title == cls._get_checkbox_title(line):\n found = True\n lines.append(line.replace(*replace_args))\n if line != lines[-1]:\n modified |= True\n\n continue\n\n lines.append(line)\n\n return \"\\n\".join(lines), found, modified", "def wizard_input_verification(form):\n # Note,that key must start with line_..., other options are ignored, to allow DNF option to work\n lines = [value for key, value in form.items() if key.startswith(\"line\")]\n # if the entered lines are unique, they will have the same value\n if len(lines) != len(set(lines)):\n return False\n else:\n return True", "def test_checkboxtextgroup(self):\r\n self.check_group('checkboxtextgroup', 'choice', 'checkbox')", "def decode_line_y_true(line):\n items = line.split()\n path = items[0]\n items = items[1:]\n\n bboxes = []\n labels = []\n for item in items:\n if not item:\n continue\n x1, y1, x2, y2, label = item.split(',')\n x1, y1, x2, y2, label = float(x1), float(y1), float(x2), float(y2), float(label)\n bboxes.append([x1, y1, x2, y2])\n labels.append(label)\n\n return path, bboxes, labels", "def iter_logical_lines(cls, blob):\r\n indent_stack = []\r\n contents = []\r\n line_number_start = None\r\n\r\n def translate_logical_line(start, end, contents, endmarker=False):\r\n while contents[0] == '\\n':\r\n start += 1\r\n contents.pop(0)\r\n while contents[-1] == '\\n':\r\n end -= 1\r\n contents.pop()\r\n indent = len(indent_stack[-1]) if indent_stack else 0\r\n if endmarker:\r\n indent = len(contents[0])\r\n return (start, end + 1, indent)\r\n\r\n for token in cls.iter_tokens(blob):\r\n token_type, token_text, token_start = token[0:3]\r\n if token_type == tokenize.INDENT:\r\n indent_stack.append(token_text)\r\n if token_type == tokenize.DEDENT:\r\n indent_stack.pop()\r\n if token_type in cls.SKIP_TOKENS:\r\n continue\r\n contents.append(token_text)\r\n if line_number_start is None:\r\n line_number_start = token_start[0]\r\n elif token_type in (tokenize.NEWLINE, tokenize.ENDMARKER):\r\n yield translate_logical_line(\r\n line_number_start,\r\n token_start[0] + (1 if token_type is tokenize.NEWLINE else -1),\r\n list(filter(None, contents)),\r\n endmarker=token_type == tokenize.ENDMARKER)\r\n contents = []\r\n line_number_start = None", "def _line_wrapper( self, diffs ):\n\n\t\t# pull from/to data and flags from mdiff iterator\n\t\tfor fromdata, todata, flag in diffs:\n\t\t\t# check for context separators and pass them through\n\t\t\tif flag is None:\n\t\t\t\tyield fromdata, todata, flag\n\t\t\t\tcontinue\n\t\t\t( fromline, fromtext ), ( toline, totext ) = fromdata, todata\n\t\t\t# for each from/to line split it at the wrap column to form\n\t\t\t# list of text lines.\n\t\t\tfromlist, tolist = [], []\n\t\t\tself._split_line( fromlist, fromline, fromtext )\n\t\t\tself._split_line( tolist, toline, totext )\n\t\t\t# yield from/to line in pairs inserting blank lines as\n\t\t\t# necessary when one side has more wrapped lines\n\t\t\twhile fromlist or tolist:\n\t\t\t\tif fromlist:\n\t\t\t\t\tfromdata = fromlist.pop( 0 )\n\t\t\t\telse:\n\t\t\t\t\tfromdata = ( '', ' ' )\n\t\t\t\tif tolist:\n\t\t\t\t\ttodata = tolist.pop( 0 )\n\t\t\t\telse:\n\t\t\t\t\ttodata = ( '', ' ' )\n\t\t\t\tyield fromdata, todata, flag", "def test_LogicalLines(self) -> None:\n content = \"\"\"\nfoo \\\\\nbar \\\\\nbaz\nfoo\nbling \\\\\nbling \\\\ bling\nbling\n\"\"\"\n fobj = io.StringIO(content)\n lines = LogicalLines(fobj).readlines()\n assert lines == [\n '\\n',\n 'foo bar baz\\n',\n 'foo\\n',\n 'bling bling \\\\ bling\\n',\n 'bling\\n',\n ], lines", "def checkLine(line: str):\n\n key_words = ['src', 'href', 'url']\n out = list()\n for word in key_words:\n if line.__contains__(word):\n out.append((True, word))\n\n # Check if output list is not empty\n if len(out) == 0:\n # If list is empty return None\n return None\n else:\n return out", "def _header_transformer(self, lines):\n needle = b'--%s\\n' % self.boundary\n in_header = False\n for line in lines:\n if line == needle:\n in_header = True\n if in_header:\n assert line[-1] == b'\\n'\n line = line[:-1] + b'\\r\\n'\n if line == b'\\r\\n':\n in_header = False\n yield line", "def logicalLines(iterable, **kwargs):\n # kwargs\n kwargs = lowerKeys(kwargs)\n continueChar = kwargs.get('continuechar', '-')\n commentChar = kwargs.get('commentchar', '!')\n #\n iterable = ( line.strip() for line in iterable )\n tmp = []\n for line in iterable:\n if line.split(commentChar)[0].endswith(continueChar):\n tmp.append(line[:-1])\n else:\n if tmp:\n tmp.append(line)\n yield ' '.join(tmp)\n tmp = []\n else:\n yield line\n # flush\n if tmp:\n yield ' '.join(tmp)", "def test_widget_is_checkbox():\n form = ExampleForm()\n field = form[\"checkbox\"]\n assert is_checkbox(field) is True", "def _create_examples(self, lines, set_type):\n examples = []\n for i, line in lines.iterrows():\n guid = \"%s-%s\" % (set_type, i)\n text_a = line['sentence1']\n text_b = line['sentence2']\n label = line['label']\n examples.append(\n InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def _create_checkboxes(self) -> widgets.VBox:\n checkboxes = []\n pgons_checkboxes = []\n graph_checkboxes = []\n\n graphs = [\n (name, \"graphs\", layer_subtype, graph)\n for name, graph in self.viewer.layer_dict[\"graphs\"].items()\n for layer_subtype in [\"graph\", \"pgons\"]\n ]\n maps = [\n (name, \"maps\", \"map\", map_layer[\"map\"])\n for name, map_layer in self.viewer.layer_dict[\"maps\"].items()\n ]\n\n # Add checkboxes for all maps and graphs (including habitats)\n for idx, (layer_name, layer_type, layer_subtype, layer_dict) in enumerate(\n maps + graphs\n ):\n\n layout = widgets.Layout(padding=\"0px 0px 0px 0px\")\n\n # Indent habitat checkboxes\n if layer_type == \"graphs\":\n if layer_dict[\"is_habitat\"]:\n layout = widgets.Layout(padding=\"0px 0px 0px 25px\")\n\n checkbox = widgets.Checkbox(\n value=True,\n description=\"{} ({})\".format(layer_name, layer_subtype),\n disabled=False,\n indent=False,\n layout=layout,\n )\n checkbox.add_traits(\n layer_type=traitlets.Unicode().tag(sync=True),\n layer_subtype=traitlets.Unicode().tag(sync=True),\n layer_name=traitlets.Unicode().tag(sync=True),\n )\n checkbox.layer_type = layer_type\n checkbox.layer_name = layer_name\n checkbox.layer_subtype = layer_subtype\n\n checkbox.observe(self._switch_layer_visibility)\n\n if idx == 0:\n checkboxes.append(widgets.HTML(\"<b>Map Data</b>\"))\n\n checkboxes.append(checkbox)\n\n if layer_subtype == \"graph\":\n graph_checkboxes.append(checkbox)\n elif layer_subtype == \"pgons\":\n pgons_checkboxes.append(checkbox)\n\n # Add habitats header if last part of main graph\n if (\n layer_type == \"graphs\"\n and layer_subtype == \"pgons\"\n and not layer_dict[\"is_habitat\"]\n ):\n checkboxes.append(\n widgets.HTML(\n \"<b>Habitats in {}</b>\".format(layer_name),\n layout=widgets.Layout(padding=\"0px 0px 0px 25px\"),\n )\n )\n\n # Add horizontal rule if last map to separate from graphs\n if idx == len(maps) - 1:\n checkboxes.append(widgets.HTML(\"<hr/>\"))\n checkboxes.append(widgets.HTML(\"<b>Graph Data</b>\"))\n\n # Create button to toggle all polygons at once\n hide_pgon_button = widgets.ToggleButton(description=\"Toggle all polygons\")\n\n def toggle_all_pgons(change):\n try:\n if change[\"name\"] == \"value\":\n for box in pgons_checkboxes:\n box.value = change[\"new\"]\n except: # pylint: disable=bare-except\n self.logger.exception(\"Exception in view button callback on click.\")\n\n hide_pgon_button.observe(toggle_all_pgons)\n\n # Create button to toggle all graphs at once\n hide_graph_button = widgets.ToggleButton(description=\"Toggle all graphs\")\n\n def toggle_all_graphs(change):\n try:\n if change[\"name\"] == \"value\":\n for box in graph_checkboxes:\n box.value = change[\"new\"]\n except: # pylint: disable=bare-except\n self.logger.exception(\"Exception in view button callback on click.\")\n\n hide_graph_button.observe(toggle_all_graphs)\n\n checkboxes.append(widgets.HTML(\"<hr/>\"))\n buttons = widgets.HBox([hide_pgon_button, hide_graph_button])\n checkboxes.append(buttons)\n\n return widgets.VBox(checkboxes)", "def is_line(self): \n return False", "def filter_lines(lines, filter_regex, groups=None):\n pattern = re.compile(filter_regex)\n for line in lines:\n match = pattern.search(line)\n if match:\n if groups is None:\n yield line\n elif len(groups) == 1:\n yield match.group(groups[0])\n else:\n matched_groups = match.groupdict()\n yield tuple(matched_groups.get(group) for group in groups)", "def exercise_lines(path):\n with open(path) as fin:\n within_exercise = False\n for line, line_number in zip(fin, count(1)):\n line = line.lstrip()\n\n if within_exercise and line.startswith('#'):\n yield line_number\n elif not within_exercise and line.startswith('#') and 'EXERCISE:' in line:\n within_exercise = True\n yield line_number\n else:\n within_exercise = False", "def _create_examples(self, lines, set_type):\n examples = []\n for (_, data) in enumerate(lines):\n examples.append(\n InputExample(\n guid=f\"{set_type}-{data['idx']}\",\n text_a=data[\"passage\"],\n text_b=data[\"question\"],\n label=str(data[\"label\"]),\n )\n )\n return examples", "def getLineInformation(line):\n \n pass", "def FilterLine(self, a_line):\n return a_line", "def _create_examples(self, lines, set_type):\n examples = []\n for (i, ids) in enumerate(lines):\n text_a = lines[ids]['sentence']\n examples.append(\n InputExample(text_a=text_a) )\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (_, data) in enumerate(lines):\n passage = data[\"passage\"][\"text\"]\n for Q in data[\"passage\"][\"questions\"]:\n question = Q[\"question\"]\n for A in Q[\"answers\"]:\n guid = f\"{set_type}-{data['idx']-Q['idx']-A['idx']}\"\n examples.append(\n InputExample(\n guid=guid,\n text_a=passage,\n text_b=question + \" \" + A[\"text\"],\n label=str(A[\"label\"]),\n )\n )\n return examples", "def _create_examples(self, lines, set_type):\n examples = []\n for (_, data) in enumerate(lines):\n examples.append(\n InputExample(\n guid=f\"{set_type}-{data['idx']}\",\n text_a=data[\"premise\"],\n text_b=data[\"hypothesis\"],\n label=str(data[\"label\"]),\n )\n )\n return examples" ]
[ "0.5809057", "0.5628909", "0.5528122", "0.5528122", "0.5431721", "0.54257345", "0.53042746", "0.5127729", "0.5053834", "0.5010728", "0.5007379", "0.49887162", "0.4927147", "0.48863062", "0.4869411", "0.48680195", "0.48293066", "0.4813869", "0.48024556", "0.47902128", "0.47862524", "0.47705993", "0.4757334", "0.47566575", "0.4747567", "0.47455814", "0.4739552", "0.47332525", "0.47291088", "0.47266454" ]
0.68260306
0
Get title of a checkbox item.
def _get_checkbox_title(cls, line: str) -> str: return line.strip()[len('- [ ] '):]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTitle(self, item):\n return item.Title() or item.getId()", "def get_title(self, list_item):\n title = list_item.find('a', {'class': 'biz-name'}).find('span')\n return title.get_text()", "def selected_title(self):\r\n return self.title", "def getMITItemTitle(self,xc,item,id):\n \n titles = xc.xpathEval(\"mitcp:title\")\n title = ''\n if titles:\n title = titles[0].getContent()\n else:\n title = id\n\n return title", "def get_title(self):\n return self._get_title_()", "def get_title(cls, obj, **kwargs):\n if isinstance(obj.data, dict):\n titles = filter(None, get_value(obj.data, \"titles.title\", []))\n if titles:\n # Show first title that evaluates to True\n return titles[0]\n return \"No title available\"", "def title(self) -> str:\n\t\t# pylint: disable=unsubscriptable-object\n\t\treturn self.value[1]", "def GetXTitle(self):\n return self.GetXaxis().GetTitle()", "def get_title(self):\n return self.title", "def get_title(self):\n return self.title", "def get_title(self):\n return self.title", "def selected_title(self):\r\n try:\r\n return menu_selected[self.name]\r\n except KeyError:\r\n return NavButton.selected_title(self)", "def title(self):\n return self.get(self._names[\"title\"])", "def title(self):\n return self.get(self._names[\"title\"])", "def get_title(self):\n return self._title", "def get_title(self):\n return self._title", "def get_title(self):\n return self._title", "def get_checked_labels(self):\r\n checked_labels = []\r\n item_count = self.count()\r\n if item_count < 1:\r\n return checked_labels\r\n\r\n for item_index in xrange(item_count):\r\n item = self.item(item_index)\r\n if item is None or item.checkState() == Qt.Unchecked:\r\n continue\r\n checked_labels.append(str(item.text()))\r\n return checked_labels", "def get_title(self):\n return SettingsBase.get_setting(self, 'title')", "def get_title(self):\n\n return self.title", "def get_title():", "def get_title(self):\n return self.run_command('get_title')[0]", "def title(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"title\")", "def title(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"title\")", "def title(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"title\")", "def title(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"title\")", "def title(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"title\")", "def title(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"title\")", "def title(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"title\")", "def title(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"title\")" ]
[ "0.6929655", "0.63157094", "0.62988156", "0.622051", "0.6198238", "0.6106785", "0.61048466", "0.60735077", "0.5996513", "0.5996513", "0.5996513", "0.5994052", "0.59846", "0.59846", "0.59831744", "0.59831744", "0.59831744", "0.59449315", "0.5930652", "0.59213066", "0.59195167", "0.5882178", "0.5878255", "0.5878255", "0.5878255", "0.5878255", "0.5878255", "0.5878255", "0.5878255", "0.5878255" ]
0.69560874
0
Set a checkbox tick in text based on checkbox title. >>> Checkbox.set(' Foo\\n [ ] bar', 'bar') returns ' Foo\\n [x] bar'
def set(cls, text: str, title: str, graceful: bool = True) -> str: result, found, modified = cls._do_checkbox_setting(text, title, ('[ ]', '[x]', 1)) if not found: raise UserInputError("Checkbox with title {!r} was not found in the provided text".format(title)) if not graceful and not modified: raise UserInputError("Checkbox with title {!r} was already set".format(title)) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def uiCheckboxSetText(checkbox, text):\n\n clibui.uiCheckboxSetText(checkbox, bytes(text, 'utf-8'))", "def _get_checkbox_title(cls, line: str) -> str:\n return line.strip()[len('- [ ] '):]", "def _do_checkbox_setting(cls, text: str, title: str, replace_args: tuple) -> typing.Tuple[str, bool, bool]:\n title = title.strip()\n\n lines = []\n found = False\n modified = False\n for line, is_checkbox in cls._iterate_lines(text):\n if is_checkbox and title == cls._get_checkbox_title(line):\n found = True\n lines.append(line.replace(*replace_args))\n if line != lines[-1]:\n modified |= True\n\n continue\n\n lines.append(line)\n\n return \"\\n\".join(lines), found, modified", "def update_special_string(value):\n global labelspecial\n now = datetime.now()\n labelspecial.document.text = check_special(now)", "def uiCheckboxText(checkbox):\n\n clibui.uiCheckboxText.restype = ctypes.c_char_p\n text = clibui.uiCheckboxText(checkbox)\n\n return text.decode()", "def unset(cls, text: str, title: str, graceful: bool = True) -> str:\n result, found, modified = cls._do_checkbox_setting(text, title, ('[x]', '[ ]', 1))\n\n if not found:\n raise UserInputError(\"Checkbox with title {!r} was not found in the provided text\".format(title))\n\n if not modified:\n # Try again with uppercase, we could optimize this to iterate only once.\n # This can resolve in a different behaviour on multiple runs with upper and lower case checkboxes,\n # but that is fine for now.\n result, found, modified = cls._do_checkbox_setting(text, title, ('[X]', '[ ]', 1))\n\n if not graceful and not modified:\n raise UserInputError(\"Checkbox with title {!r} was already unset\".format(title))\n\n return result", "def uiCheckboxSetChecked(checkbox, checked):\n\n clibui.uiCheckboxSetChecked(checkbox, checked)", "def styled_set_label_text(label, text):\n front = \"<b><span foreground='#AAAAAA' size='large'>\"\n end = \"</span></b>\"\n label.set_markup(front+text+end)", "def setCheckBoxLabel( self, cCtrlName, cLabel ):\n oControl = self.getControl( cCtrlName )\n oControl.setLabel( cLabel )", "def setLabel(*args):", "def setLabel(*args):", "def setLabel(*args):", "def setLabel(*args):", "def setLabel(*args):", "def htmlCheckbox(labelText, parName, args, labelAttr='', attr=''):\n snippet = htmlLabel(labelText,parName,labelAttr)\n checked = 'checked=\"checked\"' if parName in args else ''\n snippet += '<input type=\"checkbox\" name=\"%s\"%s%s/>\\n' % (parName,sep(checked),sep(attr))\n return snippet", "def xaxis(self,label,units):\r\n if units != \"\": label = label + \" (\" + units + \")\"\r\n self.xbox.set_text(r\"$%s$\" % (label))\r\n pass", "def set_title(self, setto):\n command = 'title ' + str(setto)\n self.run_command(command)", "def set_text(self, T):\n self.text = T", "def update_action_button_text(self, checkbox, is_checked):\n checkbox_id = self._checkbox_group.id(checkbox)\n if is_checked:\n text = self._action_string[checkbox_id]\n self._checkbox_group.buttonToggled.disconnect(\n self.update_action_button_text)\n self._restart_current.setChecked(False)\n self._restart_all.setChecked(False)\n checkbox.setChecked(True)\n self._checkbox_group.buttonToggled.connect(\n self.update_action_button_text)\n else:\n text = self._action_string[self.NO_RESTART]\n self._action_button.setText(text)", "def title(self, txt):\n num = len(txt)\n ticks = \"=\" * num\n print(ticks)\n print(txt)\n print(ticks)", "def setValue(self, value):\n if isinstance(value, (list, tuple)):\n value = ' '.join([u'\\\"%s\\\"' % str(item) for item in value])\n self.getGtkObject('property_entry').set_text(value)", "def retranslate(self):\n\t\t#NOTE: mnemonics are not recognized for this checkbox. no idea why\n\t\tself.button.set_label('')\n\t\tself.button.child.set_text_with_mnemonic(_('_...'))\n\t\tself.button.set_tooltip_text(_('Select a directory'))\n\t\tself.edit.set_tooltip_text(_('Directory location'))", "def set_text_f(self, format, *args):\n self._text.set(format % args)\n self.change_bg(\"green\")\n self._label.update_idletasks()", "def set_text(self, value: str) -> None:\n logging.info(f\"set text. {self.desc}\")\n js = f\"\"\"var elm = document.querySelectorAll(\"{self.css}\")[{self.index}];\n elm.style.border=\"2px solid red\";\n elm.value = \"{value}\";\"\"\"\n self._execute_javascript(js)", "def title(self, value):\n if len(value):\n self._title = self._wrap_line(value, self._width)\n\n # Add a blank line\n self._title.append('')", "def title(self, value: str):\n self.tk_ref.title(value)", "def setValue(self,val):\n val = str(val)\n if self._plain:\n self.input.setText(val)\n else:\n updateText(self.input,val)", "def SetLabel(self, s):\r\n\r\n self.label = s", "def set_text(self, text):\n self.set_text_f(\"%s\", text)", "def set_label(self, key: str, value: str):\n self.labels[key] = value" ]
[ "0.66640157", "0.63094", "0.61071444", "0.55329484", "0.55253977", "0.5489177", "0.53705555", "0.53607", "0.5356963", "0.5292355", "0.5292355", "0.5292355", "0.5292355", "0.5292355", "0.52801657", "0.5204396", "0.51950055", "0.5193192", "0.50891393", "0.5080792", "0.5060383", "0.49905995", "0.498609", "0.49857914", "0.49510336", "0.49510002", "0.4926922", "0.49070835", "0.488466", "0.48845991" ]
0.7073593
0
Unset a checkbox tick in text based on checkbox title. >>> Checkbox.unset(' Foo\\n [x] bar', 'bar') returns ' Foo\\n [ ] bar'
def unset(cls, text: str, title: str, graceful: bool = True) -> str: result, found, modified = cls._do_checkbox_setting(text, title, ('[x]', '[ ]', 1)) if not found: raise UserInputError("Checkbox with title {!r} was not found in the provided text".format(title)) if not modified: # Try again with uppercase, we could optimize this to iterate only once. # This can resolve in a different behaviour on multiple runs with upper and lower case checkboxes, # but that is fine for now. result, found, modified = cls._do_checkbox_setting(text, title, ('[X]', '[ ]', 1)) if not graceful and not modified: raise UserInputError("Checkbox with title {!r} was already unset".format(title)) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_checkbox_title(cls, line: str) -> str:\n return line.strip()[len('- [ ] '):]", "def set(cls, text: str, title: str, graceful: bool = True) -> str:\n result, found, modified = cls._do_checkbox_setting(text, title, ('[ ]', '[x]', 1))\n\n if not found:\n raise UserInputError(\"Checkbox with title {!r} was not found in the provided text\".format(title))\n\n if not graceful and not modified:\n raise UserInputError(\"Checkbox with title {!r} was already set\".format(title))\n\n return result", "def clearEntry(*args, **kwargs):\n\targs[0].set_text('')", "def user_labels_erase(*args):\n return _ida_hexrays.user_labels_erase(*args)", "def unset_label(self):\n self.set_label(self.label.prev_str, self.label.prev_color)", "def uiCheckboxSetText(checkbox, text):\n\n clibui.uiCheckboxSetText(checkbox, bytes(text, 'utf-8'))", "def unselect_checkbox(self, locator):\n self._info(\"Unselecting checkbox '%s'.\" % locator)\n locator = self._parse_locator(locator)\n if self._selenium.is_checked(locator):\n self._selenium.uncheck(locator)", "def delete_checkbox(self):\n wanna_delete = mb.askyesno(\n \"Warning\",\n f'Delete checkbox for column \"{self.text_value}\"?',\n icon=\"warning\"\n )\n if wanna_delete:\n self.handle_modified()\n conf[\"cols_all\"].remove(self.text_value)\n try:\n conf[\"cols_selected\"].remove(self.text_value)\n except ValueError:\n pass\n mb.showinfo(\"Info\", \"This checkbox will not bother you anymore\")\n self.box_frame.grid_remove()\n self.parent.focus_set()\n else:\n self.parent.focus_set()", "def uiCheckboxText(checkbox):\n\n clibui.uiCheckboxText.restype = ctypes.c_char_p\n text = clibui.uiCheckboxText(checkbox)\n\n return text.decode()", "def _do_checkbox_setting(cls, text: str, title: str, replace_args: tuple) -> typing.Tuple[str, bool, bool]:\n title = title.strip()\n\n lines = []\n found = False\n modified = False\n for line, is_checkbox in cls._iterate_lines(text):\n if is_checkbox and title == cls._get_checkbox_title(line):\n found = True\n lines.append(line.replace(*replace_args))\n if line != lines[-1]:\n modified |= True\n\n continue\n\n lines.append(line)\n\n return \"\\n\".join(lines), found, modified", "def clear_trash(text):\n for i in TRASH:\n text = text.replace(i, '')\n\n return text", "def remove_label(self, ):\n if self.AttributeNames.LABEL in self.attrs:\n del self.attrs[self.AttributeNames.LABEL]\n return self", "def unsetLabel(self):\n return _libsbml.GeneProduct_unsetLabel(self)", "def remove_label(self, label):\n for category in self.get_categories(LABELS_SCHEME):\n if category.label == label:\n self.category.remove(category)", "def removeTextGlyph(self, *args):\n return _libsbml.Layout_removeTextGlyph(self, *args)", "def remove_label(self, key: str):\n del self.labels[key]", "def uncheck(self, element_tuple, *, wrapper_element_tuple=None):\n self.log_info(f\"Browser.uncheck: Setting {element_tuple} checkbox to unchecked\")\n checkbox = self.CORE.find_element(*self.format_element(element_tuple))\n if checkbox.is_selected():\n if wrapper_element_tuple is not None:\n self.log_info(f\"Browser.check: Wrapper element was provided, clicking {wrapper_element_tuple} instead\")\n self.click(wrapper_element_tuple)\n else:\n self.click(element_tuple)\n else:\n self.log_info(f\"Browser.check: Skipping action as {element_tuple} is already unchecked\")\n return", "def remove_labels(number, labels):\n\n cmds = [github_cli, 'pr', 'edit', str(number)]\n for lab in labels:\n cmds += ['--remove-label', lab]\n\n with subprocess.Popen(cmds) as p:\n _, err = p.communicate()\n print(err)", "def clear(self, line=0):\n tb = self.textbox.text\n tb.configure(state=Tix.NORMAL)\n if line == 0:\n tb.delete(\"0.0\", Tix.END)\n elif line > 0:\n tb.delete(\"%d.0\" % (line + 1), Tix.END)\n else: # line < 0\n lines = tb.get(\"0.0\", Tix.END).splitlines()\n count = len(lines)\n tb.delete(\"0.0\", \"%d.0\" % (count + -(-line)))\n tb.configure(state=Tix.DISABLED)", "def clear_all(entry):\n text = entry.clear_all()", "def clear_text(self):\n # use the .children attribute to access all widgets that are \"in\" another widget\n self.root.ids.Title.text = \"\"\n self.root.ids.Artist.text = \"\" #Empty the text boxes\n self.root.ids.Year.text = \"\"\n for instance in self.root.ids.entriesBox.children: #Normalise the button state\n instance.state = 'normal'\n self.root.ids.statusLabel2.text=\"\" #Empty the status label text box", "async def removed_label(event, gh, *args, **kwargs):\n if event.data[\"label\"][\"name\"] == TRIVIAL_LABEL:\n await set_status(event, gh)", "def test_clear_selected_text(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.4\", \"4.4\"),\n after_sel=(\"2.4\", \"2.4\"),\n command_name=\"clear-selected-text\",\n )", "def unset(bot, update, chat_data):\n if 'job' not in chat_data:\n update.message.reply_text('Sem notificacoes ativadas')\n return\n\n job = chat_data['job']\n job.schedule_removal()\n del chat_data['job']\n check = emojize(\":white_check_mark:\", use_aliases=True)\n update.message.reply_text('Notificacao cancelada com sucesso'+check+'')", "def remove(self, label):\n\n\t\t\tself[label].remove()", "def _remove_title_from_name(titles: tuple, text: str) -> str:\n for title in titles:\n if f'{title}.' in text:\n return text.replace(f'{title}.', empty_string).replace(' ', space).strip()\n elif title in text:\n return text.replace(title, empty_string).replace(' ', space).strip()\n return text", "def delete_chr(text):\n \"\"\" if the user try to delete an empty line it will not allowed him:)\"\"\"\n if len(text.getText())<1:\n text.setText(\"\")\n return text\n else:\n text.setText(text.getText()[:-10]) # 10 is the length of the word \"Backspace\" + 1 letter i delete\n return text", "def removeFactor(self, string: str, string2: str) -> _AbstractKnobBuilder__T:\n ...", "def test_issue_remove_label(self):\n pass", "def onClearButton(self):\n markupsNode = slicer.util.getNode( \"MarkupsFiducial\" ) \n markupsNode.RemoveAllMarkups()" ]
[ "0.58806765", "0.5465693", "0.5399874", "0.53318834", "0.53229344", "0.51720303", "0.51474494", "0.5123869", "0.50766873", "0.50161433", "0.5007751", "0.49832973", "0.49005058", "0.4870477", "0.4860511", "0.48524624", "0.48411223", "0.48141718", "0.4802714", "0.47882122", "0.4784837", "0.47783983", "0.47638935", "0.47458324", "0.4721411", "0.4719888", "0.47184473", "0.47145408", "0.47042128", "0.46866548" ]
0.75263244
0
Serialize preprocessor and model.
def serialize_pipeline(preprocessor, clf): print("Serializing preprocessor and model.") with open(DATA_DIR + "/preprocessor.dill", "wb") as prep_f: dill.dump(preprocessor, prep_f) with open(DATA_DIR + "/model.dill", "wb") as model_f: dill.dump(clf, model_f)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dump_model(self):", "def serialize(self): \n with open(self.path+self.name, \"wb\") as pfile:\n pickle.dump(self.pyObj, pfile)", "def serialize(self):", "def serialize(self):\n pass", "def dump(self, model_name: str) -> None:\n # Dump each preprocessor\n for index, preprocessor in enumerate(self._preprocessors):\n model_filename = Files.MODEL_PREPROCESSOR_MODEL_FMT.format(\n model_name, index)\n joblib.dump(preprocessor, model_filename)\n\n # Dump the scalar\n filename = Files.MODEL_PREPROCESSOR_MODEL_FMT.format(\n model_name, Packages.Models.Training.SCALAR_MODEL_NAME)\n joblib.dump(self._last_scalar_model, filename)\n\n # Dump the dimensionality reduction model\n reduction_model_path = Files.MODEL_REDUCTION_MODEL_FMT.format(\n model_name)\n joblib.dump(self._reduction_model, reduction_model_path)", "def serialize(self):\n keys = [\n 'uid',\n 'commit_sha',\n 'timestamp',\n 'filename',\n 'comment',\n 'train_data',\n 'val_data',\n 'test_data',\n 'model_files',\n 'custom_data',\n ]\n data = {key: self.__dict__[key] for key in keys}\n with open(os.path.join(self.root_path, self._data_file), 'w') as file:\n json.dump(data, file)", "def serialize(model, optimizer, epoch):\n package = {'state_dict': model.state_dict(),\n 'optim_dict': optimizer.state_dict(),\n 'epoch': epoch}\n return package", "def _serialise(self):\n # TODO (M Foley)\n pass", "def write(self, model):\n\n # Initialize json_dump\n json_dump = {\"model\": [], \"metadata\": {}}\n\n # Set timestamp in metadata\n json_dump[\"metadata\"][\"time\"] = str(datetime.now())\n\n # Set the size of the model in metadata\n json_dump[\"metadata\"][\"model_size\"] = len(model.models)\n\n for obj in model.models:\n _class = type(obj).__name__\n if _class in [\n Winding,\n PhaseWinding,\n Wire,\n PhaseCapacitor,\n Position,\n PhaseLoad,\n ]:\n continue\n json_dump[\"model\"].append({})\n json_dump[\"model\"][-1][\"class\"] = _class\n\n try:\n json_dump[\"model\"][-1][\"name\"] = {\"class\": \"str\", \"value\": obj.name}\n except:\n json_dump[\"model\"][-1][\"name\"] = {\"class\": \"str\", \"value\": None}\n pass\n\n for key, value in obj._trait_values.items():\n if key in [\"capacitance_matrix\", \"impedance_matrix\", \"reactances\"]:\n json_dump[\"model\"][-1][key] = {\"class\": \"list\", \"value\": []}\n for v in value:\n if isinstance(v, complex):\n json_dump[\"model\"][-1][key][\"value\"].append(\n {\"class\": \"complex\", \"value\": [v.real, v.imag]}\n )\n elif isinstance(v, list):\n json_dump[\"model\"][-1][key][\"value\"].append(\n {\"class\": \"list\", \"value\": []}\n )\n for vv in v:\n if isinstance(vv, complex):\n json_dump[\"model\"][-1][key][\"value\"][-1][\n \"value\"\n ].append(\n {\n \"class\": \"complex\",\n \"value\": [vv.real, vv.imag],\n }\n )\n else:\n json_dump[\"model\"][-1][key][\"value\"][-1][\n \"value\"\n ].append(\n {\n \"class\": str(type(vv)).split(\"'\")[1],\n \"value\": vv,\n }\n )\n else:\n json_dump[\"model\"][-1][key][\"value\"].append(\n {\"class\": str(type(v)).split(\"'\")[1], \"value\": v}\n )\n continue\n if isinstance(value, list):\n json_dump[\"model\"][-1][key] = {\"class\": \"list\", \"value\": []}\n for v in value:\n\n if isinstance(v, complex):\n json_dump[\"model\"][-1][key][\"value\"].append(\n {\"class\": \"complex\", \"value\": [v.real, v.imag]}\n )\n\n elif isinstance(v, Position):\n json_dump[\"model\"][-1][key][\"value\"].append(\n {\"class\": \"Position\"}\n )\n for kkk, vvv in v._trait_values.items():\n json_dump[\"model\"][-1][key][\"value\"][-1][kkk] = {\n \"class\": str(type(vvv)).split(\"'\")[1],\n \"value\": vvv,\n }\n\n elif isinstance(v, Unicode):\n json_dump[\"model\"][-1][key][\"value\"].append(\n {\"class\": \"Unicode\", \"value\": v.default_value}\n )\n\n elif isinstance(v, Wire):\n json_dump[\"model\"][-1][key][\"value\"].append(\n {\"class\": \"Wire\"}\n )\n for kkk, vvv in v._trait_values.items():\n json_dump[\"model\"][-1][key][\"value\"][-1][kkk] = {\n \"class\": str(type(vvv)).split(\"'\")[1],\n \"value\": vvv,\n }\n\n elif isinstance(v, PhaseCapacitor):\n json_dump[\"model\"][-1][key][\"value\"].append(\n {\"class\": \"PhaseCapacitor\"}\n )\n for kkk, vvv in v._trait_values.items():\n json_dump[\"model\"][-1][key][\"value\"][-1][kkk] = {\n \"class\": str(type(vvv)).split(\"'\")[1],\n \"value\": vvv,\n }\n\n elif isinstance(v, Winding):\n json_dump[\"model\"][-1][key][\"value\"].append(\n {\"class\": \"Winding\"}\n )\n for kkk, vvv in v._trait_values.items():\n if kkk != \"phase_windings\":\n json_dump[\"model\"][-1][key][\"value\"][-1][kkk] = {\n \"class\": str(type(vvv)).split(\"'\")[1],\n \"value\": vvv,\n }\n json_dump[\"model\"][-1][key][\"value\"][-1][\n \"phase_windings\"\n ] = {\"class\": \"list\", \"value\": []}\n for phw in v.phase_windings:\n json_dump[\"model\"][-1][key][\"value\"][-1][\n \"phase_windings\"\n ][\"value\"].append({\"class\": \"PhaseWinding\"})\n for kkkk, vvvv in phw._trait_values.items():\n json_dump[\"model\"][-1][key][\"value\"][-1][\n \"phase_windings\"\n ][\"value\"][-1][kkkk] = {\n \"class\": str(type(vvvv)).split(\"'\")[1],\n \"value\": vvvv,\n }\n\n elif isinstance(v, PhaseLoad):\n json_dump[\"model\"][-1][key][\"value\"].append(\n {\"class\": \"PhaseLoad\"}\n )\n for kkk, vvv in v._trait_values.items():\n json_dump[\"model\"][-1][key][\"value\"][-1][kkk] = {\n \"class\": str(type(vvv)).split(\"'\")[1],\n \"value\": vvv,\n }\n\n continue\n\n if isinstance(value, complex):\n json_dump[\"model\"][-1][key] = {\n \"class\": \"complex\",\n \"value\": [value.real, value.imag],\n }\n continue\n\n json_dump[\"model\"][-1][key] = {\n \"class\": str(type(value)).split(\"'\")[1],\n \"value\": value,\n }\n\n with open(os.path.join(self.output_path, self.filename), \"w\") as f:\n f.write(\n json_tricks.dumps(json_dump, allow_nan=True, sort_keys=True, indent=4)\n )", "def __serialize__(self):\n return {\"_custom_type\" : self.__class__.__name__,\n \"name\" : self.name,\n \"src\" : self.src,\n \"exec_loc\" : self.exec_loc,\n \"precompiled\" : self.precompiled}", "def save_model(self):\n joblib.dump(self.pipeline, \"model.joblib\")", "def dump(self, model_path):\n pickle.dump(self.scaler, gzip.open(os.path.join(model_path, 'scaler.pkl.gz'), 'w'),\n protocol=pickle.HIGHEST_PROTOCOL)\n# pickle.dump(self.mapper, gzip.open(os.path.join(model_path, 'mapper.pkl.gz'),'w'),\n# protocol=pickle.HIGHEST_PROTOCOL)\n pickle.dump(self.batcher, gzip.open(os.path.join(model_path, 'batcher.pkl.gz'), 'w'),\n protocol=pickle.HIGHEST_PROTOCOL)", "def _export_model_representations(self, config):\n\n self.logger.msg1(\"Preparing model representations\")\n modelsets = get_modelsets(self.dbpath, self.obo, config.partition_size)\n prefix = self.rootpath + \"-models-\"\n for i, refset in enumerate(modelsets):\n progress = str(i+1) + \"/\" + str(len(modelsets))\n self.logger.msg1(\"Saving model representations: \"+progress)\n refset.save(prefix + str(i+1), \"phenotype\", what=(\"data\",))", "def serialize(self) -> Dict[str, Any]:\n serialized_outputs = {\"mappings\": []}\n\n # Get the right output dictionary.\n if len(self._manual_outputs) > 0:\n serialized_outputs[\"type\"] = \"manual\"\n d = self._manual_outputs\n else:\n serialized_outputs[\"type\"] = \"default\"\n d = self._default_outputs\n\n # Iterate through \"bindings\" (GraphOutputs).\n for key, binding in d.items():\n # Serialize: step.module.port -> output | ntype.\n smp = binding.producer_step_module_port\n source = str(smp.step_number) + \".\" + smp.module_name + \".\" + smp.port_name\n # Get type.\n ntype_str = str(binding.ntype)\n # Serialize!\n serialized_outputs[\"mappings\"].append(source + \"->\" + key + \" | \" + ntype_str)\n # Return the result.\n return serialized_outputs", "def serialize(self, obj):\n pass", "def dumpme(self) :\n fileName = \"./data/oP4_ModelBuilder.dump\"\n with open(fileName,\"wb\") as dumpedFile:\n oPickler = pickle.Pickler(dumpedFile)\n oPickler.dump(self)", "def save_model(self):\n joblib.dump(self.pipeline, 'model.joblib')\n print(colored('model.joblib saved locally', 'green'))", "def serialize(self, data):", "def dumps(self):\n pass", "def serialize(mode):\r\n serialize_version(mode)\r\n vcb.serialize(mode) \r\n for x in xfrms:\r\n x.serialize(mode)", "def serialize_data(self, app) -> dict:", "def save_model(self) -> bytes:\n\n return serialize_for_zippy(self.model)", "def serialize(self):\n\t\treturn { 'type': self.type, 'parameters' : self.parameters}", "def save(self, path=\"\"):\n path = path + \"model_\" + str(self.name) + \".txt\"\n if os.path.isfile(path):\n os.remove(path)\n f = open(path, \"w+\")\n for ident in self.networks:\n f.write(ident + \"_\" + self.networks[ident].descriptor.codify_components() + \"_\" + str(self.networks[ident].taking.size) + \",\" + self.networks[ident].taking.type + \"_\" + str(self.networks[ident].producing.size) + \",\" + self.networks[ident].producing.type + \"_\" +\n str(self.networks[ident].depth) + \"_\" + \",\".join(self.reachable[ident]) + \"_\" + \",\".join(self.comps_below[ident]) + \"\\n\")\n f.write(\"\\n\")\n\n for ident in self.inputs:\n f.write(ident + \"_\" + str(self.inputs[ident].producing.size) + \"_\" + self.inputs[ident].producing.type + \"_\" + str(self.inputs[ident].depth) + \"\\n\")\n f.write(\"\\n\")\n\n for ident in self.outputs:\n f.write(ident + \"_\" + str(self.outputs[ident].taking.size) + \"_\" + self.outputs[ident].taking.type + \"_\" + str(self.outputs[ident].depth) + \"_\" + \",\".join(self.comps_below[ident]) + \"\\n\")\n f.write(\"\\n\")\n\n for con in self.connections:\n f.write(self.connections[con].codify() + \"\\n\")\n #f.write(\"\\n\")\n\n f.close()\n\n return path", "def save_model(self, step):\n\n # file_name = params['name']\n # pickle.dump(self, gzip.open(file_name, 'wb'))", "def serialize(self, data):\n raise NotImplementedError", "def serialize(self, to_file=None):\n raise NotImplementedError", "def save_model(self):\n\n # =============================================================\n # Default : pickle the trained model. Change this (and the load\n # function, below) only if the library you used does not support\n # pickling.\n # self.Model_made.save(\"Model_made.h5\")\n # self.Model_claim.save(\"Model_claim.h5\")\n # Model_made = self.Model_made\n # Model_claim = self.Model_claim\n # self.Model_made = None\n # self.Model_claim = None\n with open('pricing_model.p', 'wb') as target:\n pickle.dump(self, target)\n\n # self.Model_made = Model_made\n # self.Model_claim = Model_claim\n\n # zipObj = ZipFile(\"model.zip\",\"w\")\n # zipObj.write(\"Model_made.h5\")\n # zipObj.write(\"Model_claim.h5\")\n # zipObj.write(\"pricing_model.p\")\n # zipObj.close()", "def serialize(self) -> str:\n pass", "def save(self):\n\t\tif os.path.exists(\"%s.obj\" % (self.name)):\n\t\t\tos.system(\"rm -f %s.obj\" % (self.name))\n\t\telse: pass\n\t\tfile = open(\"%s.obj\" % (self.name), \"wb\")\n\t\tif callable(self.obj):\n\t\t\tif \"dill\" in sys.modules:\n\t\t\t\ttry:\n\t\t\t\t\tpickle.dump(self.obj, file)\n\t\t\t\texcept:\n\t\t\t\t\twarnings.warn(\"\"\"\\\nCould not pickle function. The following attribute will not be saved with \\\nthis output: %s\"\"\" % (self.name), UserWarning)\n\t\t\t\t\tpickle.dump(None, file)\n\t\t\telse:\n\t\t\t\twarnings.warn(\"\"\"\\\nEncoding functions along with VICE outputs requires the package dill \\\n(installable via pip). The following attribute will not be saved with this \\\noutput: %s\"\"\" % (self.name), UserWarning)\n\t\t\t\ttry:\n\t\t\t\t\tpickle.dump(self._default, file)\n\t\t\t\texcept:\n\t\t\t\t\tpickle.dump(None, file)\n\t\telse:\n\t\t\ttry:\n\t\t\t\tpickle.dump(self.obj, file)\n\t\t\texcept:\n\t\t\t\twarnings.warn(\"\"\"Could not save object %s with this VICE \\\noutput.\"\"\" % (self.name), UserWarning)\n\t\t\t\tpickle.dump(None, file)\n\t\tfile.close()" ]
[ "0.6649002", "0.6420852", "0.63892835", "0.6387775", "0.63862485", "0.6346818", "0.621803", "0.6161769", "0.61575824", "0.60979456", "0.6096011", "0.5986972", "0.5961545", "0.5935674", "0.5916678", "0.5909123", "0.5811863", "0.5769323", "0.57688487", "0.5745424", "0.5719557", "0.56899434", "0.56815845", "0.5638298", "0.56323534", "0.56221193", "0.56216824", "0.5620049", "0.5618184", "0.5605494" ]
0.7590971
0
() > () Attempt to train a neural network to predict the satisfaction probability of a continuously defined environment.
def run(): cons_in, soln_in, disc = make_discriminator() target, loss, accuracy, optimiser = make_training_nodes(disc) training_set_sampler = make_sampler(cons_in, soln_in, target) test_set_sampler = make_sampler(cons_in, soln_in, target) disc.get_session().run(tf.global_variables_initializer()) fit( disc.get_session(), optimiser, training_set_sampler, 250, 2000, 32, [("Loss", loss), ("Accuracy", accuracy)], ) print( "Validation accuracy: {}".format( disc.feed(accuracy, test_set_sampler.batch(1024)) ) ) plot_surface( evaluate_surface( lambda x, y: Circles.solve([0, 0, 0.25], [x, y, 0.25]), (-1, 1, 0.08), (-1, 1, 0.08), ), x_label="Solution x", y_label="Solution y", z_label="p(satisfied | x, y)", ) plot_surface( evaluate_surface( lambda x, y: disc.feed( disc.output_node, {cons_in: [[0, 0, 0.25]], soln_in: [[x, y, 0.25]]} )[0], (-1, 1, 0.08), (-1, 1, 0.08), ), x_label="Solution x", y_label="Solution y", z_label="p(satisfied | x, y)", )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trainNet():", "def test_n_and_train(self):\r\n\r\n n = NeuronNetwork(1,\r\n [1],\r\n [[[0.0,0.0]]],\r\n [[0.0]])\r\n\r\n inputs = [[0,0], [0,1], [1,0], [1,1]]\r\n targets = [[0], [0], [0], [1]]\r\n\r\n n.train(inputs,targets,1000,180)\r\n\r\n print(n)\r\n self.assertLess(n.feed_forward([0,0]), [0.001])\r\n self.assertGreater(n.feed_forward([1,0]), [0.001])\r\n self.assertGreater(n.feed_forward([0,1]), [0.001])\r\n self.assertGreater(n.feed_forward([1,1]), [0.9])", "def train():\n # YOUR TRAINING CODE GOES HERE", "def train():\n pass", "def train(self, game_life):\n rewards = [obs.get('reward') for obs in game_life]\n cum_rewards = sum(rewards)\n\n # manage the graphics\n self.reward_graph.append(cum_rewards)\n plt.plot(self.reward_graph)\n x, y, z = peri_bounding_box\n\n # The amound of nudge\n if cum_rewards:\n displacement = cum_rewards * self.displacement\n else:\n displacement = 0 - self.displacement\n\n # Store observations and perturbed predictions\n data, targets = [], []\n\n for obs in game_life:\n\n # Perturb action\n action, prediction = obs.get('action')\n if self.epsilon and (random.uniform(0, 1.0) < self.epsilon):\n action = random.randrange(18)\n\n # Copy\n update = list(prediction)\n\n # Update only the target action\n update[0][action] = update[0][action] + displacement\n\n\n data.append(\n # Apply bounding box before appending\n np.array(obs.get('observation')[x[0]:x[1], y[0]:y[1], :])\n )\n\n\n update = np.array(update).reshape(1,18),\n targets.append(update)\n\n if data and len(data) > 15:\n # Image processing\n datagen = preprocessing.image.ImageDataGenerator(\n featurewise_center=True,\n featurewise_std_normalization=True,\n rotation_range=20,\n width_shift_range=0.2,\n height_shift_range=0.2,\n horizontal_flip=True)\n datagen.fit(data)\n\n # Training data generator\n train = datagen.flow(np.array(data), np.squeeze(np.array(targets)),\n batch_size=16)\n\n # Finally train\n self.graph.fit_generator(train, steps_per_epoch=len(data)/16,\n epochs=30, verbose=0,\n callbacks=[\n callbacks.EarlyStopping(monitor='acc'),\n # callbacks.ModelCheckpoint() ?\n ]\n )", "def train_init():\n np.random.seed(seed)\n tf.random.set_random_seed(seed)\n random.seed(seed)\n\n name = str(seed)\n desc = MNMDescriptor(5, inp_dict, outp_dict, name=name)\n desc = recursive_creator(desc, 0, 0, seed)\n hypers = {}\n for hyper in hyps:\n hypers[hyper] = np.random.choice(hyps[hyper])\n\n model = MNM(desc, hypers[\"btch_sz\"], data_inputs[\"Train\"], data_outputs[\"Train\"], loss_func_weights={\"o0\": hypers[\"wo0\"], \"o1\": hypers[\"wo1\"], \"o2\": hypers[\"wo2\"]}, name=name, lr=hypers[\"lr\"], opt=hypers[\"opt\"], random_seed=seed)\n if intelligent_training == 2:\n loss_weights = model.sequential_training(hypers[\"btch_sz\"], iter_lim // 50, conv_param, proportion, iter_lim, display_step=-1)\n else:\n loss_weights = model.autoset_training(hypers[\"btch_sz\"], iter_lim//50, conv_param, proportion, iter_lim, display_step=-1, incr=incr, decr=decr, scaling=scale)\n\n\n # ####### Save model characteristics.\n\n model.descriptor.save(path=\"\")\n model.save_weights(path=\"\")\n\n results = evaluate_model(model)\n\n np.save(\"hypers\" + str(seed) + \"_\" + str(intelligent_training) + \"_\" + str(n_networks) + \"_\" + \".npy\", hypers)\n\n np.save(\"orig_results\" + str(seed) + \"_\" + str(intelligent_training) + \"_\" + str(n_networks) + \"_\" + \".npy\", results)\n\n np.save(\"loss_weights\" + str(seed) + \"_\" + str(intelligent_training) + \"_\" + str(n_networks) + \"_\" + \".npy\", loss_weights)", "def train(features, targets, weights, bias):\n # see gradient_descent for explanation\n epochs = 100\n learning_rate = 0.1\n\n picture_nb = 2\n\n # Print current accuracy. How many people have been classified as sick/healthy correctly?\n predictions = predict(features, weights, bias)\n print(\"Accuracy: \", np.mean(predictions == targets))\n\n for epoch in range(epochs):\n if epoch % 10 == 0:\n # get normalized scores\n predictions = activation(pre_activation(features, weights, bias))\n # compare with targets to see how bad our algorithm is\n print(\"Cost = %s\" % cost(predictions, targets))\n # Replot graph. Check in create_dataset for explanation of parameters\n if picture_nb == 2:\n plt.plot(features[:, 0], (weights[0] * features[:, 0] + bias) / -weights[1], color='red')\n elif picture_nb == 11:\n plt.plot(features[:, 0], (weights[0] * features[:, 0] + bias) / -weights[1], color='green')\n else:\n plt.plot(features[:, 0], (weights[0] * features[:, 0] + bias) / -weights[1], color='orange')\n picture_nb+=1\n\n # Initialize gradients\n # weights_gradients is 2D array with 2 values\n weights_gradients = np.zeros(weights.shape)\n bias_gradient = 0\n # Go through each row\n for feature, target in zip(features, targets):\n # Compute prediction\n z = pre_activation(feature, weights, bias)\n # Get normalized score\n y = activation(z)\n # Update gradients based on formulas established before. Look at gradient_descent to understand what we\n # are doing. Also, the formulas are below, just before the call of the function train.\n weights_gradients += (y - target) * derivative_activation(z) * feature\n # no multiplication of feature because it does not depend on some coordinates.\n bias_gradient += (y - target) * derivative_activation(z)\n\n # Update variables. These are the lines that result the cost to get reduced.\n weights = weights - learning_rate * weights_gradients\n bias = bias - learning_rate * bias_gradient\n\n # Print final accuracy. How many people have been classified as sick/healthy correctly?\n predictions = predict(features, weights, bias)\n print(\"Accuracy: \", np.mean(predictions == targets))\n\n plt.scatter(features[:, 0], features[:, 1], s=40, c=targets, cmap=plt.cm.Spectral)\n plt.savefig(\"DataPointsLineEvolution.png\")\n # legend for understanding\n plt.legend(['Original division', 'New division', 'New division', 'New division', 'New division', 'New division',\n 'New division', 'New division', 'New division', 'Final division'], loc='upper left')\n # save picture of data points drawn.\n plt.savefig(\"DataPointsLineEvolutionLegend.png\")", "def and_setup(epochs):\n learning_rate = 0.15\n value_inputs = [\n # A B\n [0, 0],\n [0, 1],\n [1, 0],\n [1, 1]\n ]\n values_simple_outputs = [1 if a + b ==\n 2 else 0 for a, b in value_inputs]\n values_network_outputs = [[a] for a in values_simple_outputs]\n perceptron = Perceptron([0.5, 0.5, 0.5], 'c', ['a', 'b'])\n network = PerceptronNetwork(\n [\n PerceptronLayer(\n [\n Perceptron([0.5, 0.5, 0.5], 'c', ['a', 'b'])\n ], 'only_layer')\n ])\n\n perceptron_estimated_values = []\n network_estimated_values = []\n perceptron_unit_error = []\n network_unit_error = []\n for _ in range(0, epochs):\n for value, result in zip(value_inputs, values_simple_outputs):\n # Step 1: forward pass - predict\n estimated_value = perceptron.forward(value)\n perceptron_estimated_values.append(estimated_value)\n\n # Step 2: back pass - collect errors\n weighted_error = result - estimated_value\n unit_error = perceptron.backward(\n estimated_value, weighted_error)\n perceptron_unit_error.append(unit_error)\n\n # Step 3: update weights\n perceptron = perceptron.update_weights(\n value, unit_error, learning_rate)\n\n for values, results in zip(value_inputs, values_network_outputs):\n # Step 1: forward pass - predict\n estimated_results, layer_states = network.forward(values)\n network_estimated_values.append(estimated_results[0])\n\n # Step 2: back pass - collect errors\n unit_errors = network.backward(layer_states, results)\n network_unit_error.append(unit_errors[0][0])\n\n # Step 3: update weights\n network = network.update_weights(\n layer_states, unit_errors, learning_rate)\n\n return (perceptron,\n network,\n perceptron_estimated_values,\n network_estimated_values,\n perceptron_unit_error,\n network_unit_error)", "def eval(self):\n self.train(mode=False)", "def pcntrain(self,eta,nIterations,verbose=False): \n # Add the inputs that match the bias node\n inputs = np.concatenate((self.inputs,-np.ones((self.nData,1))),axis=1)\n targets = self.targets\n # Training\n change = range(self.nData)\n\n for n in range(nIterations):\n \n self.outputs = self._pcnfwd(inputs)\n self.weights += eta*np.dot(np.transpose(inputs),self.fd(self.outputs)*(targets-self.outputs))\n if verbose:\n print \"Iteration: \", n\n\t\tprint self.weights\n\t\t\t\n\t\tactivations = self._pcnfwd(inputs)\n\t\tprint \"Final outputs are:\"\n\t\tprint activations\n \n # Randomise order of inputs\n np.random.shuffle(change)\n inputs = inputs[change,:]\n targets = targets[change,:]", "def set_up_predictor(method, n_unit, conv_layers, class_num):\n\n mlp = MLP(out_dim=class_num, hidden_dim=n_unit)\n\n if method == 'nfp':\n print('Training an NFP predictor...')\n nfp = NFP(out_dim=n_unit, hidden_dim=n_unit, n_layers=conv_layers)\n return GraphConvPredictor(nfp, mlp)\n elif method == 'ggnn':\n print('Training a GGNN predictor...')\n ggnn = GGNN(out_dim=n_unit, hidden_dim=n_unit, n_layers=conv_layers)\n return GraphConvPredictor(ggnn, mlp)\n elif method == 'schnet':\n print('Training an SchNet predictor...')\n schnet = SchNet(out_dim=class_num, hidden_dim=n_unit,\n n_layers=conv_layers)\n return GraphConvPredictor(schnet, None)\n elif method == 'weavenet':\n print('Training a WeaveNet predictor...')\n n_atom = 20\n n_sub_layer = 1\n weave_channels = [50] * conv_layers\n\n weavenet = WeaveNet(weave_channels=weave_channels, hidden_dim=n_unit,\n n_sub_layer=n_sub_layer, n_atom=n_atom)\n return GraphConvPredictor(weavenet, mlp)\n elif method == 'rsgcn':\n print('Training an RSGCN predictor...')\n rsgcn = RSGCN(out_dim=n_unit, hidden_dim=n_unit, n_layers=conv_layers)\n return GraphConvPredictor(rsgcn, mlp)\n elif method == 'relgcn':\n print('Training an RelGCN predictor...')\n num_edge_type = 4\n relgcn = RelGCN(out_channels=n_unit, num_edge_type=num_edge_type,\n scale_adj=True)\n return GraphConvPredictor(relgcn, mlp)\n elif method == 'relgat':\n print('Train Relational GAT model...')\n relgat = RelGAT(out_dim=n_unit, hidden_dim=n_unit,\n n_layers=conv_layers)\n return GraphConvPredictor(relgat, mlp)\n raise ValueError('[ERROR] Invalid method: {}'.format(method))", "def FNN(ANNSetup, test, train):\n\n #ClassWeights = GetClassWeights(train.OutTrue,train.Weights)\n TrainWeights = GetTrainWeights(train.OutTrue,train.Weights) # Transformation of the Monte Carlo weights for training\n\n #tf.debugging.set_log_device_placement(True) #Check if system is running on the correct device\n\n #Create the model and pass it the data for Callbacks\n model = Sequential()\n model.X_train = train.Events\n model.Y_train = train.OutTrue\n model.W_train = train.Weights #Original weights!\n model.X_test = test.Events\n model.Y_test = test.OutTrue\n model.W_test = test.Weights\n\n model = BuildModel(ANNSetup,model) # Building the model from the predefinite configuration (FNN.py)\n\n Opti = GetOpti(ANNSetup.Optimizer,ANNSetup.LearnRate.Lr) # Set the optimizer\n lrate = GetLearnRate(ANNSetup.LearnRate,ANNSetup.Epochs) # Set a learning rate schedule\n Roc = Histories() # Definie history for the AUC results at each training step \n #Roc = RedHistory()\n if(lrate == None):\n Lcallbacks = [Roc]\n #Lcallbacks = []\n else:\n Lcallbacks = [Roc,lrate]\n #Lcallbacks = [lrate]\n\n model.summary()\n model.compile(optimizer=Opti, loss='binary_crossentropy', metrics=['accuracy'])\n start = time.clock() # start clock to track training time\n history = model.fit(train.Events,train.OutTrue, sample_weight=TrainWeights, validation_data=(test.Events, test.OutTrue, test.Weights), epochs=int(ANNSetup.Epochs),\n batch_size=int(ANNSetup.Batch), verbose=2, callbacks=Lcallbacks) #, callbacks=Lcallbacks , sample_weight=TrainWeights\n #history = model.fit(train.Events,train.OutTrue,batch_size=4000,epochs=2)\n end = time.clock()\n print(\"The training took {} seconds\".format(end-start))\n\n LAuc = Roc.TestAucs\n LTrainAuc = Roc.TrainAucs\n print(\"Best Test Auc {0:.4f} at Epoch {1}\".format(max(LAuc),(LAuc.index(max(LAuc))+1))) #0:.4f\n print(\"Best Train Auc {0:.4f}\".format(LTrainAuc[LAuc.index(max(LAuc))]))\n\n for i in range(len(LAuc)):\n print(\"Auc at Epoch {0}: {1:.4f} Ov: {2:.3f}\".format(i,LAuc[i],1-LAuc[i]/LTrainAuc[i]))\n\n model.save(ANNSetup.SavePath)\n\n return model, Roc", "def train(self, inputs, desired):\n inputs.append(1) # bias input\n guess = self.feedforward(inputs)\n error = desired - guess\n for i in range(len(self.weights)):\n self.weights[i] = self.weights[i] + \\\n self.learning_rate * error * inputs[i]", "def train(self, eta):\n\n def sigmoid(t):\n return 1.0 / (1.0 + math.exp(-t))\n\n learning_rate = eta\n\n perceptron_trained = False\n\n correct_instances = 0\n while (perceptron_trained == False):\n correctly_classified = 0\n for mail in self.TRAINER.MAILS:\n\n mail.output = sigmoid(np.dot(self.WEIGHTS, mail.feature_vector))\n\n for idx, weight in enumerate(self.WEIGHTS):\n self.WEIGHTS[idx] = self.WEIGHTS[idx] + \\\n learning_rate * (mail.target - mail.output) * mail.feature_vector[idx]\n\n if abs(mail.target - mail.output) <= 0.05:\n mail.correct_class = True\n correctly_classified += 1\n\n print correctly_classified\n wrongly_classified = len(self.TRAINER.MAILS) - correctly_classified\n\n if learning_rate > 0.03:\n learning_rate -= 0.01\n print learning_rate\n\n if (wrongly_classified < 5):\n perceptron_trained = True\n correct_instances = correctly_classified\n\n # we know that the output is close enough to target\n # so we can set the output = target, to have discrete values\n for mail in self.TRAINER.MAILS:\n mail.output = mail.target\n\n with open('results/weights.txt', 'w+') as f:\n for weight in self.WEIGHTS:\n f.write(str(weight)+'\\n')\n\n print (100.0 * correct_instances) / 4000.0", "def evaluate(self):\n self.training = False", "def inference_spa(flow_lik,\n flow_post,\n prior,\n simulator,\n optimizer_lik,\n optimizer_post,\n decay_rate_post,\n x_o,\n x_o_batch_post,\n dim_post,\n prob_prior,\n nbr_lik,\n nbr_epochs_lik,\n nbr_post,\n nbr_epochs_post,\n batch_size,\n batch_size_post,\n epochs_hot_start=10,\n validation_fraction=0.1,\n early_stopping=True,\n stop_after_epochs=20):\n\n nbr_iter = len(prob_prior)\n\n print(\"start full training\")\n\n models_lik = []\n models_post = []\n\n scheduler_post = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer_post, gamma=decay_rate_post)\n\n for i in range(nbr_iter):\n\n # decay post lr\n if i >= 1 and decay_rate_post > 0:\n scheduler_post.step()\n\n # print iter info\n print(\"Iteration: \" + str(i + 1))\n print(\"optimizer_post_lr: \" + str(scheduler_post.get_last_lr()))\n print(\"prob_prior: \" + str(prob_prior[i]))\n\n # update likelihood model\n\n nbr_lik_prior = int(prob_prior[i] * nbr_lik[i])\n nbr_like_post = int((1 - prob_prior[i]) * nbr_lik[i])\n\n theta_prior = prior.sample(sample_shape=(nbr_lik_prior,))\n\n if nbr_like_post == 0: # this is to avoid concatunate a tensor with grad to the theta tensor\n theta_full = theta_prior\n else:\n theta_post = flow_post.sample(nbr_like_post, context=x_o) # .reshape(1,dim)\n theta_post = theta_post.reshape((nbr_like_post, dim_post))\n # not sure if this is valid.... Is ok since we sample from a mixture\n theta_prior_check = prior.log_prob(theta_post)\n\n # print(theta_prior_check.shape)\n idx_save = (~torch.isinf(theta_prior_check)).nonzero()\n\n # print(idx_save.shape)\n\n if idx_save.shape[0] > 0:\n theta_post = theta_post[idx_save.reshape(-1), :]\n theta_full = torch.cat([theta_prior, theta_post.detach()], dim=0)\n else:\n theta_full = theta_prior\n\n # remove thetas that are outside of prior\n\n x_full = simulator(theta_full)\n\n _train_like(x_full, theta_full, nbr_epochs_lik[i], batch_size, flow_lik, optimizer_lik,\n validation_fraction, early_stopping, stop_after_epochs)\n\n # update posterior model\n\n # 2' step: train posterior model from prior predictive first, only used to get a hot start\n if i == 0:\n _train_post_prior_pred(x_full, theta_full, epochs_hot_start, batch_size, flow_post, optimizer_post,\n validation_fraction)\n # models_post.append(copy.deepcopy(flow_post))\n\n # Sample training data from posterior\n\n _train_post_sim_fly(nbr_post[i], nbr_epochs_post[i], batch_size_post, flow_post, flow_lik, optimizer_post,\n prior, x_o_batch_post, dim_post, x_o, validation_fraction, early_stopping,\n stop_after_epochs)\n\n # save trained model for each iter\n models_lik.append(copy.deepcopy(flow_lik))\n models_post.append(copy.deepcopy(flow_post))\n\n return models_lik, models_post", "def learn(self):\n total_error = 0\n threshold = 0.05\n\n counter = len(self._training_set)*len(self._perceptrons)\n total_error+=self.learning_step()\n\n while total_error/counter > threshold:\n counter += len(self._training_set)*len(self._perceptrons)\n total_error +=self.learning_step()", "def train_X(self):\n raise Exception(\"You cannot train a base predictor.\")", "def __init__(self, input_dim: int, hidden_layer: bool) -> None:\n\n # --- PLEASE READ --\n # Use the parameters below to train your feed-forward neural network.\n\n # Number of hidden units if hidden_layer = True.\n self.hidden_units = 25\n\n # This parameter is called the step size, also known as the learning rate (lr).\n # See 18.6.1 in AIMA 3rd edition (page 719).\n # This is the value of α on Line 25 in Figure 18.24.\n self.lr = 1e-3\n\n # Line 6 in Figure 18.24 says \"repeat\".\n # This is the number of times we are going to repeat. This is often known as epochs.\n self.epochs = 400\n\n # We are going to store the data here.\n # Since you are only asked to implement training for the feed-forward neural network,\n # only self.x_train and self.y_train need to be used. You will need to use them to implement train().\n # The self.x_test and self.y_test is used by the unit tests. Do not change anything in it.\n self.x_train, self.y_train = None, None\n self.x_test, self.y_test = None, None\n\n np.random.seed(0) # Setting random seed for reproducibility.\n\n self.weights, self.biases = None, None # Initializing weights and biases\n\n self.total_layers = (\n None # Initializing the number of layers in the neural network.\n )\n\n \"\"\"\n I have implemented the neural network as two lists, one with the weight matrices between each layer,\n and the other with the bias vectors.\n \"\"\"\n if hidden_layer:\n self.weights = [\n np.random.randn(self.hidden_units, input_dim),\n np.random.randn(1, self.hidden_units),\n ]\n self.biases = [np.random.randn(self.hidden_units, 1), np.random.randn(1, 1)]\n self.total_layers = 3\n else:\n self.weights = [np.random.randn(1, input_dim)]\n self.biases = [np.random.randn(1, 1)]\n self.total_layers = 2\n\n self.sigmoid = lambda x: 1.0 / (\n 1.0 + np.exp(-x)\n ) # The sigmoid activation function: 1 / (1 + e^(-x))\n\n self.sigmoid_derivative = lambda x: self.sigmoid(x) * (\n 1 - self.sigmoid(x)\n ) # The derivative of the sigmoid activation function to be used in the backpropagation algorithm.", "def fit(self):\n accuracy = 0\n no_improvement = 0\n epochs = trange(self.args.epochs, desc=\"Accuracy\")\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.learning_rate)\n self.model.train()\n for epoch in epochs:\n self.optimizer.zero_grad()\n prediction = self.model(self.propagation_matrix, self.features)\n loss = torch.nn.functional.nll_loss(prediction[self.train_nodes], self.target[self.train_nodes])\n loss = loss + self.args.lambd*torch.sum(self.model.page_rank_convolution_1.weight_matrix**2)\n loss.backward()\n self.optimizer.step()\n new_accuracy = self.score(self.validation_nodes)\n epochs.set_description(\"Validation Accuracy: %g\" % round(new_accuracy,4))\n if new_accuracy < accuracy:\n no_improvement = no_improvement + 1\n if no_improvement == self.args.early_stopping:\n epochs.close()\n break\n else:\n no_improvement = 0\n accuracy = new_accuracy \n acc = self.score(self.test_nodes)\n print(\"\\nTest accuracy: \" + str(round(acc,4)) )", "def neural_network(X, Y, Xs_test, Ys_test):\n ## YOUR CODE HERE\n #################\n return 0", "def train(network_def, target_params, optimizer, states, actions, next_states, rewards,\n terminals, loss_weights, target_opt, num_tau_samples, num_tau_prime_samples,\n num_quantile_samples, cumulative_gamma, double_dqn, kappa, tau,alpha,clip_value_min, num_actions,rng):\n online_params = optimizer.target\n def loss_fn(params, rng_input, target_quantile_vals, loss_multipliers):\n def online(state):\n return network_def.apply(params, state, num_quantiles=num_tau_samples, rng=rng_input)\n\n model_output = jax.vmap(online)(states)\n quantile_values = model_output.quantile_values\n quantiles = model_output.quantiles\n chosen_action_quantile_values = jax.vmap(lambda x, y: x[:, y][:, None])(\n quantile_values, actions)\n # Shape of bellman_erors and huber_loss:\n # batch_size x num_tau_prime_samples x num_tau_samples x 1.\n bellman_errors = (target_quantile_vals[:, :, None, :] -\n chosen_action_quantile_values[:, None, :, :])\n # The huber loss (see Section 2.3 of the paper) is defined via two cases:\n # case_one: |bellman_errors| <= kappa\n # case_two: |bellman_errors| > kappa\n huber_loss_case_one = (\n (jnp.abs(bellman_errors) <= kappa).astype(jnp.float32) *\n 0.5 * bellman_errors ** 2)\n huber_loss_case_two = (\n (jnp.abs(bellman_errors) > kappa).astype(jnp.float32) *\n kappa * (jnp.abs(bellman_errors) - 0.5 * kappa))\n huber_loss = huber_loss_case_one + huber_loss_case_two\n # Tile by num_tau_prime_samples along a new dimension. Shape is now\n # batch_size x num_tau_prime_samples x num_tau_samples x 1.\n # These quantiles will be used for computation of the quantile huber loss\n # below (see section 2.3 of the paper).\n quantiles = jnp.tile(quantiles[:, None, :, :],\n [1, num_tau_prime_samples, 1, 1]).astype(jnp.float32)\n # Shape: batch_size x num_tau_prime_samples x num_tau_samples x 1.\n quantile_huber_loss = (jnp.abs(quantiles - jax.lax.stop_gradient(\n (bellman_errors < 0).astype(jnp.float32))) * huber_loss) / kappa\n # Sum over current quantile value (num_tau_samples) dimension,\n # average over target quantile value (num_tau_prime_samples) dimension.\n # Shape: batch_size x num_tau_prime_samples x 1.\n loss = jnp.sum(quantile_huber_loss, axis=2)\n loss = jnp.squeeze(jnp.mean(loss, axis=1), axis=-1)\n\n mean_loss = jnp.mean(loss_multipliers * loss)\n\n return mean_loss, loss\n\n grad_fn = jax.value_and_grad(loss_fn, has_aux=True)\n\n if target_opt == 0:\n rng, target_quantile_vals = target_quantile_values_fun(\n network_def,\n online_params,\n target_params,\n next_states,\n rewards,\n terminals,\n num_tau_prime_samples,\n num_quantile_samples,\n cumulative_gamma,\n double_dqn,\n rng)\n\n elif target_opt == 1:\n rng, target_quantile_vals = munchau_target_quantile_values_fun(\n network_def,\n online_params,\n target_params,\n states,\n actions,\n next_states,\n rewards,\n terminals,\n num_tau_prime_samples,\n num_quantile_samples,\n cumulative_gamma,\n double_dqn,\n rng,\n tau,\n alpha,\n clip_value_min,\n num_actions\n )\n\n else:\n print('error')\n\n rng, rng_input = jax.random.split(rng)\n (mean_loss, loss), grad = grad_fn(online_params, rng_input, target_quantile_vals, loss_weights)\n optimizer = optimizer.apply_gradient(grad)\n return rng, optimizer, loss, mean_loss", "def make_neural_net_challenging():\n i0 = Input('i0', -1.0) # this input is immutable\n i1 = Input('i1', 0.0)\n i2 = Input('i2', 0.0)\n seed_random()\n wt1 = random_weight()\n wt2 = random_weight()\n wt3 = random_weight()\n wt4 = random_weight()\n wt5 = random_weight()\n wt6 = random_weight()\n wt7 = random_weight()\n wt8 = random_weight()\n wt9 = random_weight()\n wt10 = random_weight()\n\t\n w1A = Weight('w1A', wt1)\n w2A = Weight('w2A', wt2)\n w1B = Weight('w1B', wt3)\n w2B = Weight('w2B', wt4)\n wA = Weight('wA', -1)\n wB = Weight('wB', -1)\n wAC = Weight('wAC', wt5)\n wBC = Weight('wBC', wt6)\n wC = Weight('wC', -1)\n wAD = Weight('wAD', wt7)\n wBD = Weight('wBD', wt8)\n wD = Weight('wD', -1)\n wCE = Weight('wCE', wt9)\n wDE = Weight('wDE', wt10)\n wE = Weight('wE', -1)\n\n # Inputs must be in the same order as their associated weights\n A = Neuron('A', [i1,i2,i0], [w1A,w2A,wA])\n B = Neuron('B', [i1,i2,i0], [w1B,w2B,wB])\n C = Neuron('C', [A,B,i0], [wAC,wBC,wC])\n D = Neuron('D', [A,B,i0], [wAD,wBD,wD])\n E = Neuron('D', [C,D,i0], [wCE,wDE,wE])\n P = PerformanceElem(E, 0.0)\n\n net = Network(P,[A, B, C, D, E])\n return net", "def train(net):\n\n # Set SGD hyperparameters\n n_iter = 200 # number of iterations of SGD\n learning_rate = 1e-3 # learning rate for SGD\n momentum = .99 # momentum parameter for SGD\n batch_size = 100 # number of data points in each mini-batch\n\n # Initialize binary cross-entropy loss function\n loss_fn = nn.BCELoss()\n\n # Initialize SGD optimizer with momentum\n optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=momentum)\n\n # Placeholder to save loss at each iteration\n track_loss = []\n\n # Loop over iterations\n for i in range(n_iter):\n\n # Sample minibatch of oriented grating stimuli\n stimuli, tilt = sample_stimuli(batch_size)\n\n # Evaluate loss and update network weights\n out = net(stimuli) # predicted probability of tilt right\n loss = loss_fn(out, tilt) # evaluate loss\n optimizer.zero_grad() # clear gradients\n loss.backward() # compute gradients\n optimizer.step() # update weights\n \n # Keep track of loss at each iteration\n track_loss.append(loss.item())\n\n # Track progress\n if (i + 1) % (n_iter / 10) == 0:\n print('iteration %i | loss: %.3f | percent correct: %.2f%%' % (i + 1, loss.item(), 100 * pcorrect(out, tilt)))\n \n # Plot loss\n plt.plot(track_loss)\n plt.xlabel('iterations of SGD')\n plt.ylabel('binary cross-entropy loss')\n plt.xlim([0, None])\n plt.ylim([0, None])\n plt.show()", "def train_naive(): # add arguments as needed\n pass", "def train(self):\n\t\traise NotImplementedError", "def shallownn_predict(self, x: np.array) -> np.array:\r\n if self.snnModel is None:\r\n print(\"neural network not trained, please run shallownn_fit first!\")\r\n return None\r\n else:\r\n return np.argmax(self.snnModel.predict(x), axis=1)", "def __init__(self, learning_rate=0.01, *args, **kwargs):\n #\n #\n # Placeholder variables to feed data into\n self.inputs = tf.placeholder(tf.float32, [None, 784], name=\"Inputs\")\n self.correct_labels = tf.placeholder(tf.float32, [None, 10], name=\"CorrectLabels\")\n\n # Gives a single number instead of the one-hot representation we\n # expect as input\n self._correct_labels_as_numbers = tf.argmax(self.correct_labels, axis=1, name=\"CorrectLabelsAsNumbers\")\n\n #\n #\n # Create network architecture\n self._nodes_per_layer = [784, 100, 100, 10] # Currently not configurable\n self._biases = self._create_bias_shaped_variables(self._nodes_per_layer, stddev=0.1)\n self._weights = self._create_weight_shaped_variables(self._nodes_per_layer, stddev=0.1)\n\n self._raw_outputs = self._create_network_architecture(\n inputs=self.inputs,\n biases=self._biases,\n weights=self._weights\n )\n self._var_list = self._biases + self._weights\n\n # \"Soft\" classification outputs are the softmax probabilities\n # for each input to be from a particular class, e.g. for a number\n # six we could see something like this in the output:\n # 0 1 2 3 4 5 6 7 8 9\n # [0.0, 0.0, 0.0, 0.0, 0.0, 0.2, 0.7, 0.0, 0.1, 0.0]\n self._soft_classification_outputs = tf.nn.softmax(self._raw_outputs, name=\"SoftClassificationOutputs\")\n # \"Hard\" classification outputs are just a single number for\n # each input, representing the class the network thinks the number\n # most likely belongs to (e.g. \"6\").\n self._classification_outputs = tf.argmax(self._raw_outputs, axis=1, name=\"ClassificationOutputs\")\n\n #\n #\n # Initialize evaluation\n _correct_prediction = tf.equal(\n self._classification_outputs,\n self._correct_labels_as_numbers\n )\n # Ratio of correct classifications out of all classifications\n # (currently the only metric this class offers).\n self._accuracy = tf.reduce_mean(tf.cast(_correct_prediction, tf.float32), name=\"Accuracy\")\n\n #\n #\n # Initialize learning\n self._optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n\n self._cross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(labels=self.correct_labels, logits=self._raw_outputs)\n )\n self._train_step = self._optimizer.minimize(self._cross_entropy)", "def train(network_def, target_params, optimizer, states, actions, next_states, rewards,\n terminals, loss_weights, cumulative_gamma, target_opt, mse_inf,tau,alpha,clip_value_min, rng):\n online_params = optimizer.target\n def loss_fn(params, rng_input, target, loss_multipliers):\n def q_online(state):\n return network_def.apply(params, state, rng=rng_input)\n\n q_values = jax.vmap(q_online)(states).q_values\n q_values = jnp.squeeze(q_values)\n replay_chosen_q = jax.vmap(lambda x, y: x[y])(q_values, actions)\n \n if mse_inf:\n loss = jax.vmap(mse_loss)(target, replay_chosen_q)\n else:\n loss = jax.vmap(dqn_agent.huber_loss)(target, replay_chosen_q)\n\n mean_loss = jnp.mean(loss_multipliers * loss)\n return mean_loss, loss\n\n rng, rng2, rng3, rng4 = jax.random.split(rng, 4)\n\n def q_target(state):\n return network_def.apply(target_params, state, rng=rng2)\n\n def q_target_online(state):\n return network_def.apply(online_params, state, rng=rng4)\n\n if target_opt == 0:\n target = dqn_agent.target_q(q_target, next_states, rewards, terminals, cumulative_gamma) \n elif target_opt == 1:\n #Double DQN\n target = target_DDQN(q_target_online, q_target, next_states, rewards, terminals, cumulative_gamma)\n\n elif target_opt == 2:\n #Munchausen\n target = target_m_dqn(q_target_online, q_target, states,next_states,actions,rewards,terminals,\n cumulative_gamma,tau,alpha,clip_value_min)\n else:\n print('error')\n\n grad_fn = jax.value_and_grad(loss_fn, has_aux=True)\n (mean_loss, loss), grad = grad_fn(online_params, rng3, target, loss_weights)\n optimizer = optimizer.apply_gradient(grad)\n return optimizer, loss, mean_loss", "def train(x_train, y_train, x_valid, y_valid, config):\n train_acc = []\n valid_acc = []\n train_loss = []\n valid_loss = []\n best_model = None\n NUM_EPOCH = config['epochs']\n EARLY_STOP = config['early_stop']\n EARLY_STOP_EPOCH = config['early_stop_epoch']\n BATCH_SIZE = config['batch_size']\n model = NeuralNetwork(config=config)\n loss = float('inf')\n best_loss = float('inf')\n best_accuracy = 0\n patience = 0\n\n\n\n for i in range (NUM_EPOCH):\n\n x_train, y_train = shuffle(x_train, y_train)\n x_train = np.asarray(x_train)\n y_train = np.asarray(y_train)\n\n for j in range (0, len(x_train), BATCH_SIZE):\n start = j\n end = j + BATCH_SIZE\n if (end > len(x_train)):\n end = len(x_train)\n\n x = x_train[start:end]\n y = y_train[start:end]\n\n model.forward(x, y) \n model.backward()\n\n train_epoch_loss = model.forward(x_train, y_train)\n \n train_predict = np.zeros_like(model.y)\n train_predict[np.arange(len(model.y)), model.y.argmax(1)] = 1\n\n train_accuracy = sum([1 if all(train_predict[i] == y_train[i]) else 0 for i in range(len(y_train))])/len(y_train)\n\n train_loss.append(train_epoch_loss)\n train_acc.append(train_accuracy)\n \n valid_epoch_loss = model.forward(x_valid, y_valid)\n valid_predict = np.zeros_like(model.y)\n valid_predict[np.arange(len(model.y)), model.y.argmax(1)] = 1\n\n valid_accuracy = sum([1 if all(valid_predict[i] == y_valid[i]) else 0 for i in range(len(y_valid))])/len(y_valid)\n\n valid_loss.append(valid_epoch_loss)\n valid_acc.append(valid_accuracy)\n\n\n print(\"Epoch:\", i, \"Train Accuracy|Loss:\", train_accuracy,\"| \", train_epoch_loss, \"~|~ Valid: \", valid_accuracy, \" | \", valid_epoch_loss)\n if EARLY_STOP:\n if valid_epoch_loss > best_loss and patience >= EARLY_STOP_EPOCH:\n return train_acc, valid_acc, train_loss, valid_loss, best_model\n elif valid_epoch_loss > best_loss and patience < EARLY_STOP_EPOCH:\n patience += 1\n else:\n patience = 0\n if valid_epoch_loss < best_loss:\n best_loss = valid_epoch_loss\n best_accuracy = valid_accuracy\n best_model = copy.deepcopy(model)\n\n loss = valid_epoch_loss\n\n \n best_model = model \n return train_acc, valid_acc, train_loss, valid_loss, best_model" ]
[ "0.7067374", "0.6367169", "0.63666576", "0.62751734", "0.62707525", "0.6263812", "0.62565476", "0.6230025", "0.62205124", "0.62147105", "0.6212505", "0.6206183", "0.6205297", "0.61582935", "0.61562765", "0.61477864", "0.6143865", "0.6137806", "0.6127682", "0.612734", "0.61254925", "0.61239237", "0.61190796", "0.6115356", "0.61108977", "0.6110455", "0.61098933", "0.6102682", "0.61014426", "0.61013824" ]
0.65185124
1
Draws a wheel of radius 1, centered at the origin, in the xy plane
def draw_wheel(): outer_radius = 1 thickness = .4 if wireframe: glutWireTorus(thickness,outer_radius - thickness,8,8) else: glutSolidTorus(thickness,outer_radius - thickness,8,8) glPushAttrib(GL_CURRENT_BIT) glPushAttrib(GL_LIGHTING_BIT) glDisable(GL_LIGHTING) glColor3f(0,0,0) glutWireTorus(thickness+.01,outer_radius - thickness + 0.005,8,8) glPopAttrib() glPopAttrib()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wheel():\n wheel_pos = read_npy_file('wheel.position.npy')\n wheel_timestamps = read_npy_file('wheel.timestamps.npy')\n wheel_rate = get_rate(wheel_timestamps)\n\n wheel_ts = TimeSeries(\n name='wheel_position',\n starting_time=wheel_timestamps[0, 1],\n rate=wheel_rate,\n data=np.ravel(wheel_pos),\n unit='mm',\n conversion=0.135,\n description='The position reading of the rotary encoder attached to '\n 'the rubber wheel that the mouse pushes left and right '\n 'with his forelimbs.',\n comments='The wheel has radius 31 mm and 1440 ticks per revolution, '\n 'so multiply by 2*pi*r/tpr=0.135 to convert to millimeters. '\n 'Positive velocity (increasing numbers) correspond to clockwise '\n 'turns (if looking at the wheel from behind the mouse), i.e. '\n 'turns that are in the correct direction for stimuli presented '\n 'to the left. Likewise negative velocity corresponds to right choices.'\n )\n nwb_file.add_acquisition(wheel_ts)", "def draw_car():\r\n\twheel_radius = .5\r\n\twheel_thickness = .4\r\n\r\n\tglPushMatrix()\r\n\r\n\t# shift the car up so the base lies at the origin\r\n\tglTranslatef(0,wheel_radius,0)\r\n\t\r\n\tdraw_car_body()\r\n\r\n\t# draw the car wheels\r\n\t# assume the car is facing down the -z axis\r\n\t# front left, front right, back left, back right\r\n\tww = wheel_thickness/2\r\n\twheel_centers = [(-.5-ww,0,-1),(.5+ww,0,-1),(-.5-ww,0,1),(.5+ww,0,1)]\r\n\tfor i in range(4):\r\n\t\tglPushMatrix()\r\n\t\tapply(glTranslatef,wheel_centers[i])\r\n\t\tglRotatef(90,0,1,0)\r\n\t\tglScalef(wheel_radius,wheel_radius,wheel_radius)\r\n\t\tdraw_wheel()\r\n\t\tglPopMatrix()\r\n\r\n\tglPopMatrix()", "def drawCircle(t, x, y, radius):\r\n t.up()\r\n t.goto(x + radius, y)\r\n t.setheading(90)\r\n t.down()\r\n for count in range(120):\r\n t.left(3)\r\n t.forward(2.0 * math.pi * radius / 120.0)", "def side_wheel_from_axis():", "def create_wheel(sectors):\n # Define the size of the canvas and the radius of the wheel\n canvas_width = 500\n canvas_height = 500\n wheel_radius = min(canvas_width, canvas_height) * 0.4\n rgb_range = range(0, 256)\n\n # Create a tkinter window and canvas\n root = tk.Tk()\n canvas = tk.Canvas(root, width=canvas_width, height=canvas_height)\n canvas.pack()\n\n # Calculate the angle for each sector\n num_sectors = len(sectors)\n num_colors = num_sectors\n sector_angle = 360 / num_sectors\n get_colors = lambda n: [\"#%06x\" % random.randint(0, 0xFFFFFF) for _ in range(n)]\n colors = get_colors(num_colors)\n\n # Draw the sectors of the wheel\n start_angle = 0\n for i in range(num_sectors):\n end_angle = start_angle + sector_angle\n center_x = canvas_width / 2 + (wheel_radius / 2) * math.cos(math.radians(start_angle + sector_angle / 2))\n center_y = canvas_height / 2 - (wheel_radius / 2) * math.sin(math.radians(start_angle + sector_angle / 2))\n canvas.create_arc((canvas_width / 2 - wheel_radius, canvas_height / 2 - wheel_radius,\n canvas_width / 2 + wheel_radius, canvas_height / 2 + wheel_radius),\n start=start_angle, extent=sector_angle, fill=colors[i], outline='black',\n width=2, style='pie')\n canvas.create_text(center_x, center_y, text=sectors[i], font=('Arial', 12, 'bold'))\n start_angle = end_angle\n\n # Draw an arrow pointing to the center of the circle\n # Calculate the angle for the selected sector\n selected_sector = sectors.index(spinit(sectors))\n selected_angle = selected_sector * sector_angle + sector_angle / 2\n\n # Calculate the coordinates of the base of the arrow\n arrow_base_x = canvas_width / 2\n arrow_base_y = canvas_height / 2\n\n # Calculate the coordinates of the tip of the arrow\n arrow_tip_x = arrow_base_x + (wheel_radius * 0.4) * math.sin(math.radians(selected_angle))\n arrow_tip_y = arrow_base_y - (wheel_radius * 0.4) * math.cos(math.radians(selected_angle))\n\n # Draw the arrow\n canvas.create_line(arrow_base_x, arrow_base_y, arrow_tip_x, arrow_tip_y, fill='red', width=10, arrow='last',\n arrowshape=(15, 20, 5))\n\n # Schedule the window to close after 5 seconds\n root.after(3000, root.destroy)\n\n # Start the tkinter event loop\n root.mainloop()", "def drawCircle(x, y, r):\n pen1.up()\n pen1.goto(x,y)\n pen1.down()\n pen1.circle(r)", "def wheel(ticks):\n m = PyMouse()\n m.scroll(ticks)", "def wheel(pos):\n if pos < 85:\n return Color(pos * 3, 255 - pos * 3, 0)\n elif pos < 170:\n pos -= 85\n return Color(255 - pos * 3, 0, pos * 3)\n else:\n pos -= 170\n return Color(0, pos * 3, 255 - pos * 3)", "def __drawCircle(self, center, radius, color, drawwidth=1):\n radius *= self.viewZoom\n if radius < 1: radius = 1\n else: radius = int(radius)\n\n pygame.draw.circle(self.screen, color, center, radius, drawwidth)", "def DrawCircle(self, center, radius, color, drawwidth=1):\r\n radius *= self.zoom\r\n if radius < 1:\r\n radius = 1\r\n else: radius = int(radius)\r\n\r\n pygame.draw.circle(self.surface, color.bytes, center, radius, drawwidth)", "def wheel(self, start=0, end=0):\n\t\tif end == 0: end = self.leds\n\t\tsize = end - start\n\t\tself.wheelOffset += 1\n\t\tif self.wheelOffset == 384: self.wheelOffset = 0;\n\t\tfor i in range(size):\n\t\t\tcolor = (i * (384 / size) + self.wheelOffset) % 384;\n\t\t\tif color < 128:\n\t\t\t\tr = 127 - color % 128\n\t\t\t\tg = color % 128\n\t\t\t\tb = 0\n\t\t\telif color < 256:\n\t\t\t\tg = 127 - color % 128\n\t\t\t\tb = color % 128\n\t\t\t\tr = 0\n\t\t\telse:\n\t\t\t\tb = 127 - color % 128\n\t\t\t\tr = color % 128\n\t\t\t\tg = 0\n\t\t\tself.set(start + i, r, g, b)\n\t\t\tprint r,',',g,',',b\n\t\tself.update()", "def wheel(self,pos):\n\t\tif pos < 85:\n\t\t\treturn Color(pos * 3, 255 - pos * 3, 0)\n\t\telif pos < 170:\n\t\t\tpos -= 85\n\t\t\treturn Color(255 - pos * 3, 0, pos * 3)\n\t\telse:\n\t\t\tpos -= 170\n\t\t\treturn Color(0, pos * 3, 255 - pos * 3)", "def wheel(pos):\n if pos < 85:\n return Color(pos * 3, 255 - pos * 3, 0)\n elif pos < 170:\n pos -= 85\n return Color(255 - pos * 3, 0, pos * 3)\n else:\n pos -= 170\n return Color(0, pos * 3, 255 - pos * 3)", "def wheel(pos):\n if pos < 85:\n return Color(pos * 3, 255 - pos * 3, 0)\n elif pos < 170:\n pos -= 85\n return Color(255 - pos * 3, 0, pos * 3)\n else:\n pos -= 170\n return Color(0, pos * 3, 255 - pos * 3)", "def wheel(pos):\n if pos < 85:\n return Color(pos * 3, 255 - pos * 3, 0)\n elif pos < 170:\n pos -= 85\n return Color(255 - pos * 3, 0, pos * 3)\n else:\n pos -= 170\n return Color(0, pos * 3, 255 - pos * 3)", "def wheel(pos):\n if pos < 85:\n return Color(pos * 3, 255 - pos * 3, 0)\n elif pos < 170:\n pos -= 85\n return Color(255 - pos * 3, 0, pos * 3)\n else:\n pos -= 170\n return Color(0, pos * 3, 255 - pos * 3)", "def wheel(pos):\n if pos < 85:\n return Color(pos * 3, 255 - pos * 3, 0)\n elif pos < 170:\n pos -= 85\n return Color(255 - pos * 3, 0, pos * 3)\n else:\n pos -= 170\n return Color(0, pos * 3, 255 - pos * 3)", "def wheel(pos):\n if pos < 85:\n return Color(pos * 3, 255 - pos * 3, 0)\n elif pos < 170:\n pos -= 85\n return Color(255 - pos * 3, 0, pos * 3)\n else:\n pos -= 170\n return Color(0, pos * 3, 255 - pos * 3)", "def wheel(self,pos):\n if pos < 85:\n return NmctPixel.Color(pos * 3, 255 - pos * 3, 0)\n elif pos < 170:\n pos -= 85\n return NmctPixel.Color(255 - pos * 3, 0, pos * 3)\n else:\n pos -= 170\n return NmctPixel.Color(0, pos * 3, 255 - pos * 3)", "def wheel(pos):\r\n if pos < 85:\r\n return Color(pos * 3, 255 - pos * 3, 0)\r\n elif pos < 170:\r\n pos -= 85\r\n return Color(255 - pos * 3, 0, pos * 3)\r\n else:\r\n pos -= 170\r\n return Color(0, pos * 3, 255 - pos * 3)", "def wheel(pos):\n\tif pos < 85:\n\t\treturn Color(pos * 3, 255 - pos * 3, 0)\n\telif pos < 170:\n\t\tpos -= 85\n\t\treturn Color(255 - pos * 3, 0, pos * 3)\n\telse:\n\t\tpos -= 170\n\t\treturn Color(0, pos * 3, 255 - pos * 3)", "def wheel(pos):\n\tif pos < 85:\n\t\treturn Color(pos * 3, 255 - pos * 3, 0)\n\telif pos < 170:\n\t\tpos -= 85\n\t\treturn Color(255 - pos * 3, 0, pos * 3)\n\telse:\n\t\tpos -= 170\n\t\treturn Color(0, pos * 3, 255 - pos * 3)", "def wheel(pos):\n\tif pos < 85:\n\t\treturn Color(pos * 3, 255 - pos * 3, 0)\n\telif pos < 170:\n\t\tpos -= 85\n\t\treturn Color(255 - pos * 3, 0, pos * 3)\n\telse:\n\t\tpos -= 170\n\t\treturn Color(0, pos * 3, 255 - pos * 3)", "def wheel(pos):\n if pos < 85:\n return Color(255, 0, 0)\n elif pos < 170:\n pos -= 85\n return Color(255, 0, 255)\n #return Color(0, 255, 255 - pos * 3)\n else:\n pos -= 170\n return Color(255, 0, 0)\n #return Color(0, 255, 27)", "def wdraw_circle(self, wx, wy, dradius, fill, outline):\r\n dx, dy = self.w_to_d(wx, wy)\r\n self.canvas.create_oval(dx - dradius, dy - dradius, dx + dradius, dy + dradius, fill=fill, outline=outline)", "def front_wheel_from_axis():", "def draw_full_circle(x, y, radius):\n iterations = int(2 * radius * pi)\n s = sin(2 * pi / iterations)\n c = cos(2 * pi / iterations)\n\n dx, dy = radius, 0.\n\n glBegin(GL_TRIANGLE_FAN)\n glVertex2f(x, y)\n for _ in range(iterations + 1):\n glVertex2f(x + dx, y + dy)\n dx, dy = (dx * c + dy * s), (dy * c - dx * s)\n glEnd()", "def wheel(pos):\n if pos < 85:\n return Color(pos * 3, 255 - pos * 3, 0)\n elif pos < 170:\n pos -= 85\n return Color(255 - pos * 3, 0, pos * 3)\n else:\n pos -= 170\n return Color(0, pos * 3, 255 - pos * 3)", "def draw_circle(c):\n turtle.circle(c.radius)", "def draw_circle(c):\n turtle.circle(c.radius)" ]
[ "0.6423294", "0.6398033", "0.636496", "0.6214387", "0.6188329", "0.6164647", "0.61628", "0.61493146", "0.6137537", "0.61131966", "0.60819876", "0.6075274", "0.60751307", "0.60751307", "0.60751307", "0.60751307", "0.60751307", "0.60751307", "0.60696834", "0.60660064", "0.60341024", "0.60341024", "0.60341024", "0.5984614", "0.59051925", "0.5895419", "0.5874974", "0.58581805", "0.5854651", "0.5854651" ]
0.7546296
0
Draws the car body. It is a 1x1x2 cube with its base at the origin.
def draw_car_body(): # draw the car body glPushMatrix() glTranslatef(0,.5,0) glScalef(1,1,2) if wireframe: glutWireCube(1) else: glutSolidCube(1) # draw the wireframe outer shell glPushAttrib(GL_CURRENT_BIT) glPushAttrib(GL_LIGHTING_BIT) glDisable(GL_LIGHTING) glColor3f(0,0,0) glutWireCube(1.001) glPopAttrib() glPopAttrib() glPopMatrix()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_car():\r\n\twheel_radius = .5\r\n\twheel_thickness = .4\r\n\r\n\tglPushMatrix()\r\n\r\n\t# shift the car up so the base lies at the origin\r\n\tglTranslatef(0,wheel_radius,0)\r\n\t\r\n\tdraw_car_body()\r\n\r\n\t# draw the car wheels\r\n\t# assume the car is facing down the -z axis\r\n\t# front left, front right, back left, back right\r\n\tww = wheel_thickness/2\r\n\twheel_centers = [(-.5-ww,0,-1),(.5+ww,0,-1),(-.5-ww,0,1),(.5+ww,0,1)]\r\n\tfor i in range(4):\r\n\t\tglPushMatrix()\r\n\t\tapply(glTranslatef,wheel_centers[i])\r\n\t\tglRotatef(90,0,1,0)\r\n\t\tglScalef(wheel_radius,wheel_radius,wheel_radius)\r\n\t\tdraw_wheel()\r\n\t\tglPopMatrix()\r\n\r\n\tglPopMatrix()", "def draw_car(self):\n a = self.h / 50\n ellipse(screen, BLACK, (self.x - 15 * a, self.y + 35 * a, 30 * a, 10 * a))\n rect(screen, LIGHT_BLUE, (self.x, self.y, self.dir * 260 * a, self.h))\n rect(screen, LIGHT_BLUE, (self.x + self.dir * 40 * a, self.y - 40 * a, self.dir * 130 * a, 40 * a))\n rect(screen, LIGHT_GREY, (self.x + self.dir * 50 * a, self.y - 30 * a, self.dir * 45 * a, 30 * a))\n rect(screen, LIGHT_GREY, (self.x + self.dir * 120 * a, self.y - 30 * a, self.dir * 48 * a, 30 * a))\n rect(screen, LIGHT_GREY, (self.x + self.dir * 248 * a, self.y + 2 * a, self.dir * 10 * a, 10 * a))\n circle(screen, BLACK, (self.x + self.dir * int(220 * a), self.y + int(50 * a)), int(25 * a))\n circle(screen, BLACK, (self.x + self.dir * int(50 * a), self.y + int(50 * a)), int(25 * a))", "def render(self, screen):\n x,y = self.getBallPos()\n pygame.draw.circle(screen, (255, 255, 255), (x, y), self.RADIUS)", "def __drawCube(self):\n self.cubePos = [[[(160, 160), (200, 160), (240, 160)],\n [(160, 200), (200, 200), (240, 200)],\n [(160, 240), (200, 240), (240, 240)]],\n [[(400, 160), (440, 160), (480, 160)],\n [(400, 200), (440, 200), (480, 200)],\n [(400, 240), (440, 240), (480, 240)]],\n [[(280, 160), (320, 160), (360, 160)],\n [(280, 200), (320, 200), (360, 200)],\n [(280, 240), (320, 240), (360, 240)]],\n [[(40, 160), (80, 160), (120, 160)],\n [(40, 200), (80, 200), (120, 200)],\n [(40, 240), (80, 240), (120, 240)]],\n [[(160, 40), (200, 40), (240, 40)],\n [(160, 80), (200, 80), (240, 80)],\n [(160, 120), (200, 120), (240, 120)]],\n [[(160, 280), (200, 280), (240, 280)],\n [(160, 320), (200, 320), (240, 320)],\n [(160, 360), (200, 360), (240, 360)]]]\n self.cubeColor = {1: 'green', 2: 'blue', 3: 'red', 4: 'orange',\\\n 5: 'white', 6: 'yellow'}\n for x in range(6):\n for y in range(3):\n for z in range(3):\n pos = self.cubePos[x][y][z]\n color = self.cubeColor[self.cube.cube[x][y][z]]\n self.cv.create_rectangle(pos[0], pos[1], pos[0]+40, pos[1]+40,\n fill=color, width='2')", "def draw_body(node, body):\r\n\t\tx,y,z = body.getPosition()\r\n\t\tnode.setPosition(vector3df(x,y,z)*10)\r\n\t\tw,xx,yy,zz = body.getQuaternion()\r\n\t\tnode.setRotation(vector3df(degrees(xx), degrees(yy), degrees(zz)))\r\n\t\tif body.shape == \"box\":\r\n\t\t\tsx,sy,sz = body.boxsize\r\n\t\t\tnode.setScale(vector3df(sx,sy,sz))", "def draw_cube(self, window):\n size = pygame.display.get_surface().get_size()\n width = (size[0]/4)\n\n window.fill((000,000,000))\n\n self.draw_face(\"U\", window, (0 + (width*1), 0 + (width*0)), width)\n self.draw_face(\"L\", window, (0 + (width*0), 0 + (width*1)), width)\n self.draw_face(\"F\", window, (0 + (width*1) * 1, 0 + (width*1)), width)\n self.draw_face(\"R\", window, (0 + (width*2), 0 + (width*1)), width)\n self.draw_face(\"B\", window, (0 + (width*3), 0 + (width*1)), width)\n self.draw_face(\"D\", window, (0 + (width*1), 0 + (width*2)), width)\n\n pygame.display.update()", "def draw(self):\r\n arcade.draw_circle_filled(self.center.x, self.center.y, BULLET_RADIUS, BULLET_COLOR)", "def draw(self):\n arcade.draw_circle_filled(self.center.x, self.center.y, BALL_RADIUS, BALL_COLOR)\n return", "def drawCube( self ):\n glBegin(GL_QUADS);\n mTexture(0.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n mTexture(1.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);\n mTexture(0.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n mTexture(1.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n mTexture(0.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n mTexture(0.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);\n mTexture(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n mTexture(0.0, 0.0); glVertex3f(-1.0, 1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f( 1.0, 1.0, 1.0);\n mTexture(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n mTexture(1.0, 1.0); glVertex3f(-1.0, -1.0, -1.0);\n mTexture(0.0, 1.0); glVertex3f( 1.0, -1.0, -1.0);\n mTexture(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);\n mTexture(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n mTexture(0.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);\n mTexture(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n mTexture(0.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n mTexture(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n mTexture(1.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);\n mTexture(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glEnd()", "def draw(self):\r\n arcade.draw_rectangle_filled(self.center.x, self.center.y, self.radius, self.radius, TARGET_SAFE_COLOR)", "def draw(self):\r\n arcade.draw_circle_filled(self.center.x, self.center.y, self.radius, TARGET_COLOR)", "def drawCube( self ):\n glBegin(GL_QUADS);\n glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, 1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f( 1.0, 1.0, 1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, -1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f( 1.0, -1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glEnd()", "def play_moving(self, cr, phase):\n width, height = self.width, self.height\n delta = height * 0.05 * cos(phase * pi * 2)\n\n cr.save()\n cr.set_line_join(cairo.LINE_JOIN_BEVEL)\n # Draw feets\n self._draw_feet(cr, width * 0.3, height * 0.8 + delta)\n self._draw_feet(cr, width * (1 - 0.3), height * 0.8 - delta, -1)\n # Draw body\n self._draw_cylinder(cr, width * 0.2, height * 0.8, height * 0.3, (0, 0, 0.7))\n # Draw head\n self._draw_cylinder(cr, width * 0.2, height * 0.5, height * 0.2, (0.7, 0.7, 0.7))\n # Draw eyes\n self._draw_eyes(cr)\n cr.restore()", "def draw(self, screen):\n screen.blit(self.rotate_surface, [self.x_pos, self.y_pos])\n self.draw_radar(screen)", "def play_moving(self, cr, phase):\n width, height = self.width, self.height\n delta = height * 0.05 * cos(phase * pi * 2)\n\n cr.save()\n cr.set_line_join(cairo.LINE_JOIN_BEVEL)\n # Draw feets\n self._draw_feet(cr, width * 0.3, height * 0.8 + delta)\n self._draw_feet(cr, width * (1 - 0.3), height * 0.8 - delta, -1)\n # Draw body\n self._draw_cylinder(cr, width * 0.2, height * 0.8, height * 0.3, (0.2, 0.2, 0.2))\n # Draw head\n self._draw_cylinder(cr, width * 0.2, height * 0.5, height * 0.2, (0.3, 0.3, 0.3))\n # Draw eyes\n self._draw_eyes(cr)\n cr.restore()", "def draw_shape(self, r=0, g=0, b=0): # black is the default color\r\n turtles= turtle.Turtle()\r\n turtles.speed(0) # Makes the turtle speed up\r\n turtles.color(r, g, b)\r\n turtles.showturtle()\r\n turtles.penup()\r\n turtles.pendown()\r\n\r\n # draws the Shape to the screen\r\n\r\n for i in range(self.num_sides):\r\n turtles.forward(self.side_length)\r\n turtles.left(360/(self.num_sides))\r\n turtles.hideturtle()", "def draw(self):\n self.screen.fill(pygame.Color(0,0,0))\n for brick in self.model.bricks:\n pygame.draw.rect(self.screen, brick.color, pygame.Rect(brick.x,brick.y,brick.width,brick.height))\n pygame.draw.rect(self.screen, pygame.Color(255,255,255), pygame.Rect(self.model.paddle.x,self.model.paddle.y,self.model.paddle.width,self.model.paddle.height))\n pygame.draw.ellipse(self.screen, pygame.Color(128,128,128),(self.model.ball.x-self.model.ball.r, self.model.ball.y-self.model.ball.r, 2*self.model.ball.r,2*self.model.ball.r))\n pygame.display.update()", "def draw():\n screen.fill((0, 0, 0))\n alien.draw()", "def draw(self, screen):\n\t\tpygame.draw.circle(screen, self.color, self.pos, self.radius)", "def draw(self):\n self.screen.fill(BACKGROUND_COLOR)\n self.cannon.draw(self.screen)\n self.objects.draw(self.screen)", "def draw(self):\n x = self.displacement.x + self.physics_canvas.origin_x\n y = self.displacement.y + self.physics_canvas.origin_y\n self.canvas_id = self.physics_canvas.canvas.create_rectangle(x-10,y+10,x+10,y-10, fill='black') # e.g.", "def drawCube(self):\r\n glBegin(GL_QUADS);\r\n glTexCoord2f(0.0, 0.0);\r\n glVertex3f(-1.0, -1.0, 1.0);\r\n glTexCoord2f(1.0, 0.0);\r\n glVertex3f(1.0, -1.0, 1.0);\r\n glTexCoord2f(1.0, 1.0);\r\n glVertex3f(1.0, 1.0, 1.0);\r\n glTexCoord2f(0.0, 1.0);\r\n glVertex3f(-1.0, 1.0, 1.0);\r\n glTexCoord2f(1.0, 0.0);\r\n glVertex3f(-1.0, -1.0, -1.0);\r\n glTexCoord2f(1.0, 1.0);\r\n glVertex3f(-1.0, 1.0, -1.0);\r\n glTexCoord2f(0.0, 1.0);\r\n glVertex3f(1.0, 1.0, -1.0);\r\n glTexCoord2f(0.0, 0.0);\r\n glVertex3f(1.0, -1.0, -1.0);\r\n glTexCoord2f(0.0, 1.0);\r\n glVertex3f(-1.0, 1.0, -1.0);\r\n glTexCoord2f(0.0, 0.0);\r\n glVertex3f(-1.0, 1.0, 1.0);\r\n glTexCoord2f(1.0, 0.0);\r\n glVertex3f(1.0, 1.0, 1.0);\r\n glTexCoord2f(1.0, 1.0);\r\n glVertex3f(1.0, 1.0, -1.0);\r\n glTexCoord2f(1.0, 1.0);\r\n glVertex3f(-1.0, -1.0, -1.0);\r\n glTexCoord2f(0.0, 1.0);\r\n glVertex3f(1.0, -1.0, -1.0);\r\n glTexCoord2f(0.0, 0.0);\r\n glVertex3f(1.0, -1.0, 1.0);\r\n glTexCoord2f(1.0, 0.0);\r\n glVertex3f(-1.0, -1.0, 1.0);\r\n glTexCoord2f(1.0, 0.0);\r\n glVertex3f(1.0, -1.0, -1.0);\r\n glTexCoord2f(1.0, 1.0);\r\n glVertex3f(1.0, 1.0, -1.0);\r\n glTexCoord2f(0.0, 1.0);\r\n glVertex3f(1.0, 1.0, 1.0);\r\n glTexCoord2f(0.0, 0.0);\r\n glVertex3f(1.0, -1.0, 1.0);\r\n glTexCoord2f(0.0, 0.0);\r\n glVertex3f(-1.0, -1.0, -1.0);\r\n glTexCoord2f(1.0, 0.0);\r\n glVertex3f(-1.0, -1.0, 1.0);\r\n glTexCoord2f(1.0, 1.0);\r\n glVertex3f(-1.0, 1.0, 1.0);\r\n glTexCoord2f(0.0, 1.0);\r\n glVertex3f(-1.0, 1.0, -1.0);\r\n glEnd()", "def draw(self):\n arcade.draw_rectangle_filled(self.center.x,\n self.center.y,\n self.width,\n self.height,\n arcade.color.WHITE)", "def render(self,screen):\n pygame.draw.circle(screen, (255,0,0), self.circlePos.int(),self.circleRad,0)\n tipPt = self.circlePos + 5 * self.circleVel.normalized()\n perpVector = math3d.VectorN(-self.circleVel[1], self.circleVel[0]).normalized()\n rsidePt = self.circlePos + 5 * perpVector\n lsidePt = self.circlePos + 5 *-perpVector\n\n pygame.draw.line(screen, (255,255,255), self.circlePos, tipPt)\n pygame.draw.line(screen, (255,255,255), self.circlePos, rsidePt)\n pygame.draw.line(screen,(255,255,255), self.circlePos, lsidePt)\n pygame.draw.polygon(screen,(255,255,255),(tipPt,rsidePt,lsidePt),3)", "def draw_ball(self):\n circle(screen, self.color, (self.x, self.y), self.r)", "def draw_cube(self, vec):\n # TOP FACE\n gl.glBegin(gl.GL_QUADS)\n gl.glVertex3f(vec[0] + self.spacer, vec[1], vec[2] + self.spacer)\n gl.glVertex3f(vec[0], vec[1], vec[2] + self.spacer)\n gl.glVertex3f(vec[0], vec[1] + self.spacer, vec[2] + self.spacer)\n gl.glVertex3f(vec[0] + self.spacer, vec[1] + self.spacer, vec[2] + \\\n self.spacer)\n # BOTTOM FACE\n gl.glVertex3f(vec[0] + self.spacer, vec[1], vec[2])\n gl.glVertex3f(vec[0], vec[1], vec[2])\n gl.glVertex3f(vec[0], vec[1] + self.spacer, vec[2])\n gl.glVertex3f(vec[0] + self.spacer, vec[1] + self.spacer, vec[2])\n # FRONT FACE\n gl.glVertex3f(vec[0] + self.spacer, vec[1] + self.spacer, vec[2] + \\\n self.spacer)\n gl.glVertex3f(vec[0], vec[1] + self.spacer, vec[2] + self.spacer)\n gl.glVertex3f(vec[0], vec[1] + self.spacer, vec[2])\n gl.glVertex3f(vec[0] + self.spacer, vec[1] + self.spacer, vec[2])\n # BACK FACE\n gl.glVertex3f(vec[0] + self.spacer, vec[1], vec[2] + self.spacer)\n gl.glVertex3f(vec[0], vec[1], vec[2] + self.spacer)\n gl.glVertex3f(vec[0], vec[1], vec[2])\n gl.glVertex3f(vec[0] + self.spacer, vec[1], vec[2])\n # RIGHT FACE\n gl.glVertex3f(vec[0] + self.spacer, vec[1], vec[2] + self.spacer)\n gl.glVertex3f(vec[0] + self.spacer, vec[1] + self.spacer, vec[2] + \\\n self.spacer)\n gl.glVertex3f(vec[0] + self.spacer, vec[1] + self.spacer, vec[2])\n gl.glVertex3f(vec[0] + self.spacer, vec[1], vec[2])\n # LEFT FACE\n gl.glVertex3f(vec[0], vec[1] + self.spacer, vec[2] + self.spacer)\n gl.glVertex3f(vec[0], vec[1], vec[2] + self.spacer)\n gl.glVertex3f(vec[0], vec[1], vec[2])\n gl.glVertex3f(vec[0], vec[1] + self.spacer, vec[2])\n gl.glEnd()", "def render(self, game):\n pygame.draw.circle(game.screen,\n self.colour,\n (int(self.x), int(self.y)), self.r)", "def draw(self):\n arcade.draw_rectangle_outline(self.position_x, self.position_y, self.radius, self.color)", "def draw(self):\n arcade.draw_xywh_rectangle_filled(\n self.x, self.y, self.width, self.height, self.fill.color\n )\n arcade.draw_xywh_rectangle_outline(\n self.x, self.y, self.width, self.height, self.pen.color, 3\n )", "def draw(self):\n arcade.draw_circle_filled(self.position_x, self.position_y, self.radius,self.player_color)" ]
[ "0.77651775", "0.71250576", "0.6589624", "0.6474782", "0.6466148", "0.64601725", "0.64139956", "0.6389157", "0.6299738", "0.6197127", "0.61908185", "0.6183466", "0.61715764", "0.6170825", "0.616894", "0.6168042", "0.6151615", "0.6133578", "0.61240333", "0.61225885", "0.6109048", "0.6097265", "0.608535", "0.60634094", "0.60570043", "0.60552174", "0.6030057", "0.5999211", "0.599263", "0.59674454" ]
0.85635704
0
Draws a car. The 'car' is a 1x1x2 cube with its base at the origin, with wheels at the four corners.
def draw_car(): wheel_radius = .5 wheel_thickness = .4 glPushMatrix() # shift the car up so the base lies at the origin glTranslatef(0,wheel_radius,0) draw_car_body() # draw the car wheels # assume the car is facing down the -z axis # front left, front right, back left, back right ww = wheel_thickness/2 wheel_centers = [(-.5-ww,0,-1),(.5+ww,0,-1),(-.5-ww,0,1),(.5+ww,0,1)] for i in range(4): glPushMatrix() apply(glTranslatef,wheel_centers[i]) glRotatef(90,0,1,0) glScalef(wheel_radius,wheel_radius,wheel_radius) draw_wheel() glPopMatrix() glPopMatrix()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_car(self):\n a = self.h / 50\n ellipse(screen, BLACK, (self.x - 15 * a, self.y + 35 * a, 30 * a, 10 * a))\n rect(screen, LIGHT_BLUE, (self.x, self.y, self.dir * 260 * a, self.h))\n rect(screen, LIGHT_BLUE, (self.x + self.dir * 40 * a, self.y - 40 * a, self.dir * 130 * a, 40 * a))\n rect(screen, LIGHT_GREY, (self.x + self.dir * 50 * a, self.y - 30 * a, self.dir * 45 * a, 30 * a))\n rect(screen, LIGHT_GREY, (self.x + self.dir * 120 * a, self.y - 30 * a, self.dir * 48 * a, 30 * a))\n rect(screen, LIGHT_GREY, (self.x + self.dir * 248 * a, self.y + 2 * a, self.dir * 10 * a, 10 * a))\n circle(screen, BLACK, (self.x + self.dir * int(220 * a), self.y + int(50 * a)), int(25 * a))\n circle(screen, BLACK, (self.x + self.dir * int(50 * a), self.y + int(50 * a)), int(25 * a))", "def draw_car_body():\r\n\t# draw the car body\t\r\n\tglPushMatrix()\r\n\tglTranslatef(0,.5,0)\r\n\tglScalef(1,1,2)\r\n\tif wireframe:\r\n\t\tglutWireCube(1)\r\n\telse:\r\n\t\tglutSolidCube(1)\r\n\t\t# draw the wireframe outer shell\r\n\t\tglPushAttrib(GL_CURRENT_BIT)\r\n\t\tglPushAttrib(GL_LIGHTING_BIT)\r\n\t\tglDisable(GL_LIGHTING)\r\n\t\tglColor3f(0,0,0)\r\n\t\tglutWireCube(1.001)\t\r\n\t\tglPopAttrib()\r\n\t\tglPopAttrib()\r\n\tglPopMatrix()", "def new_car(self):\r\n random_num = random.randint(1, 3)\r\n if random_num == 1:\r\n new_car = Turtle('square')\r\n new_car.shapesize(stretch_wid=1, stretch_len=2)\r\n new_car.penup()\r\n new_car.color(random.choice(COLOURS))\r\n random_y = random.randint(-240, 270)\r\n new_car.goto(280, random_y)\r\n self.all_cars.append(new_car)", "def red_car(self):\n self.color = \"red\"\n self.wheels = 4", "def get_car(self):\n car = Car()\n\n body = self.__builder.get_body()\n car.set_body(body)\n\n engine = self.__builder.get_engine()\n car.set_engine(engine)\n\n i = 0\n while i < 4:\n wheel = self.__builder.get_wheel()\n car.attach_wheel(wheel)\n i += 1\n\n return car", "def animate_car():\n\n car_lines = []\n tire_lines = []\n # move car along\n # glPushMatrix()\n offset = [car.position.x, car.position.y, car.position.z]\n transformation_matrix = push_translation_rotation_matrix(offset, 0)\n # glTranslated(car.position.x, car.position.y, car.position.z)\n car_lines.extend(draw_object(loadCar(), transformation_matrix)) # draw car\n # drawCar()\n\n # translate and rotate tires\n for tire in car.tires:\n offset = [tire.x, tire.y, tire.z]\n transformation_matrix = push_translation_rotation_matrix(offset, car.tire_rotation,\n rot_type=\"z\")\n tire_lines.extend(draw_object(loadTire(), transformation_matrix))\n\n pop_matrix()\n pop_matrix()\n\n return car_lines, tire_lines", "def draw(self):\r\n arcade.draw_circle_filled(self.center.x, self.center.y, self.radius, TARGET_COLOR)", "def drawCars(self):\n for car in self.cars:\n if car.aliveForFrames >= ALIVE_THRESHOLD:\n msg = 'ID: {0:>2}\\n'.format(car.id)\n msg += 'conf:{0:.2f}%\\n'.format(car.displayedConfidence * 100)\n msg += 'active: {} frames'.format(car.aliveForFrames - car.aliveForFrames % 5)\n self.car_detector.draw_boxes(self.image, tuple(car.box), msg)\n\n return self.image", "def draw(self, screen):\n\t\tpygame.draw.circle(screen, self.color, self.pos, self.radius)", "def draw(self, surface):\n color = pygame.Color(255, 255, 255)\n pygame.draw.circle(surface, color, self.position, Molecule.radius, 2)", "def draw(self):\n arcade.draw_circle_filled(self.position_x, self.position_y, self.radius,self.player_color)", "def draw(self):\r\n arcade.draw_circle_filled(self.center.x, self.center.y, BULLET_RADIUS, BULLET_COLOR)", "def car(img, x, y):\n gameDisplay.blit(img, (x, y)) # blit display the image", "def draw(self):\n pygame.draw.circle(screen, self.color, (int(self.x), int(self.y)),\n self.radius)", "def diag(self):\n print(\"This car is\", self.color, \"and has wheels\", self.wheels)", "def setup(self):\n pygame.init()\n screen = self.screen\n screen.fill((240,240,240)) \n inter_car_gap = 10\n \n for car in self.model.board.cars.values():\n \n # get long and short side of the car\n long_side = int(((self.size*(car.length)) - inter_car_gap))\n short_side = self.size - inter_car_gap \n\n # get car x and y coordinates in order to place car on the board\n position = self.model.get_car_pos(car)\n row, column = position[0], position[1]\n x = ((column)*self.size + (inter_car_gap / 2))\n y = ((row)*self.size + (inter_car_gap / 2))\n\n # place red car on the board\n if car.cid == 'X':\n image = pygame.Surface((long_side, short_side))\n sprite = pygame.draw.rect(image,self.red,(0,0,((self.size*(car.length))-inter_car_gap),(self.size - inter_car_gap)))\n \n # place trucks on the board\n elif car.length > 2:\n if car.orientation == \"H\":\n image = pygame.Surface((long_side, short_side))\n sprite = pygame.draw.rect(image, self.green, (0, 0, ((self.size*car.length)-inter_car_gap), (self.size - inter_car_gap)))\n else:\n image = pygame.Surface((short_side, long_side))\n sprite = pygame.draw.rect(image, self.green, (0, 0 ,(self.size - inter_car_gap), ((self.size*car.length) - inter_car_gap)))\n \n # place cars on the board\n else:\n if car.orientation == \"H\":\n image = pygame.Surface((long_side, short_side))\n sprite = pygame.draw.rect(image,self.blue,(0,0,((self.size*car.length) - inter_car_gap),(self.size - inter_car_gap)))\n else:\n image = pygame.Surface((short_side, long_side))\n sprite = pygame.draw.rect(image,self.blue,(0,0,(self.size - inter_car_gap),((self.size * car.length) - inter_car_gap)))\n \n screen.blit(image,(x,y))\n\n pygame.display.update()", "def draw(self):\n radius = self.width / 2\n center_x = self.x + radius\n center_y = self.y + radius\n arcade.draw_circle_filled(center_x, center_y, radius, self.fill.color)\n arcade.draw_circle_outline(\n center_x, center_y, radius, self.pen.color, 3)", "def play_moving(self, cr, phase):\n width, height = self.width, self.height\n delta = height * 0.05 * cos(phase * pi * 2)\n\n cr.save()\n cr.set_line_join(cairo.LINE_JOIN_BEVEL)\n # Draw feets\n self._draw_feet(cr, width * 0.3, height * 0.8 + delta)\n self._draw_feet(cr, width * (1 - 0.3), height * 0.8 - delta, -1)\n # Draw body\n self._draw_cylinder(cr, width * 0.2, height * 0.8, height * 0.3, (0.2, 0.2, 0.2))\n # Draw head\n self._draw_cylinder(cr, width * 0.2, height * 0.5, height * 0.2, (0.3, 0.3, 0.3))\n # Draw eyes\n self._draw_eyes(cr)\n cr.restore()", "def play_moving(self, cr, phase):\n width, height = self.width, self.height\n delta = height * 0.05 * cos(phase * pi * 2)\n\n cr.save()\n cr.set_line_join(cairo.LINE_JOIN_BEVEL)\n # Draw feets\n self._draw_feet(cr, width * 0.3, height * 0.8 + delta)\n self._draw_feet(cr, width * (1 - 0.3), height * 0.8 - delta, -1)\n # Draw body\n self._draw_cylinder(cr, width * 0.2, height * 0.8, height * 0.3, (0, 0, 0.7))\n # Draw head\n self._draw_cylinder(cr, width * 0.2, height * 0.5, height * 0.2, (0.7, 0.7, 0.7))\n # Draw eyes\n self._draw_eyes(cr)\n cr.restore()", "def render(self, screen):\n x,y = self.getBallPos()\n pygame.draw.circle(screen, (255, 255, 255), (x, y), self.RADIUS)", "def draw( self ):\n\t\t\t\n\t\ttransposition = lambda point: (point[0] + WINDOW_X, WINDOW_Y - point[1])\n\t\t\t \n\t\tx, y = transposition( self.position.xy )\n\t\tpygame.draw.circle(self.screen, self.color, ( int(x + 0.5), int(y + 0.5) ), self.r)", "def _draw_character(self, img, x, y, theta, view_x=0, view_y=0):\n # Rotate the image and get its dimensions.\n rotated = pygame.transform.rotate(img, np.degrees(theta))\n rect = rotated.get_rect()\n\n # Calculate the global position of the corner of the car within the map\n x_global = x - rect.width / 2.0\n y_global = y - rect.height / 2.0\n\n # The car should be displayed relative to the current view.\n x = x_global - view_x\n y = y_global - view_y\n\n self.surface.blit(rotated, (int(round(x)), int(round(y))))", "def render(self, game):\n pygame.draw.circle(game.screen,\n self.colour,\n (int(self.x), int(self.y)), self.r)", "def car(img,x, y):\n gameDisplay.blit(img, (x, y)) # blit display the image", "def drawFace(win, winW, winH):\n face = Circle(Point(winW/2, winH/2), min(winW, winH)*11/24)\n face.setOutline(\"black\")\n face.setFill(\"burlywood\")\n face.draw(win)", "def draw(self):\n arcade.draw_circle_filled(self.center.x, self.center.y, BALL_RADIUS, BALL_COLOR)\n return", "def draw_wheel():\r\n\touter_radius = 1\r\n\tthickness = .4\r\n\tif wireframe:\r\n\t\tglutWireTorus(thickness,outer_radius - thickness,8,8)\r\n\telse:\r\n\t\tglutSolidTorus(thickness,outer_radius - thickness,8,8)\r\n\t\tglPushAttrib(GL_CURRENT_BIT)\r\n\t\tglPushAttrib(GL_LIGHTING_BIT)\r\n\t\tglDisable(GL_LIGHTING)\r\n\t\tglColor3f(0,0,0)\r\n\t\tglutWireTorus(thickness+.01,outer_radius - thickness + 0.005,8,8)\t\r\n\t\tglPopAttrib()\r\n\t\tglPopAttrib()", "def draw(self, screen):\n screen.blit(self.rotate_surface, [self.x_pos, self.y_pos])\n self.draw_radar(screen)", "def __drawCube(self):\n self.cubePos = [[[(160, 160), (200, 160), (240, 160)],\n [(160, 200), (200, 200), (240, 200)],\n [(160, 240), (200, 240), (240, 240)]],\n [[(400, 160), (440, 160), (480, 160)],\n [(400, 200), (440, 200), (480, 200)],\n [(400, 240), (440, 240), (480, 240)]],\n [[(280, 160), (320, 160), (360, 160)],\n [(280, 200), (320, 200), (360, 200)],\n [(280, 240), (320, 240), (360, 240)]],\n [[(40, 160), (80, 160), (120, 160)],\n [(40, 200), (80, 200), (120, 200)],\n [(40, 240), (80, 240), (120, 240)]],\n [[(160, 40), (200, 40), (240, 40)],\n [(160, 80), (200, 80), (240, 80)],\n [(160, 120), (200, 120), (240, 120)]],\n [[(160, 280), (200, 280), (240, 280)],\n [(160, 320), (200, 320), (240, 320)],\n [(160, 360), (200, 360), (240, 360)]]]\n self.cubeColor = {1: 'green', 2: 'blue', 3: 'red', 4: 'orange',\\\n 5: 'white', 6: 'yellow'}\n for x in range(6):\n for y in range(3):\n for z in range(3):\n pos = self.cubePos[x][y][z]\n color = self.cubeColor[self.cube.cube[x][y][z]]\n self.cv.create_rectangle(pos[0], pos[1], pos[0]+40, pos[1]+40,\n fill=color, width='2')", "def __make_carriage(self):\n # Create base rectangle\n length = self.parameters['carriage_length']\n width = self.parameters['carriage_width']\n height = self.parameters['carriage_height']\n carriage = fso.Box(x=length, y=width, z=height)\n\n # Subtract slide from carraige\n slide_width = self.parameters['slide_width'] + 2*self.parameters['slide_tolerance']\n slide_height = self.parameters['slide_height'] + 2*self.parameters['slide_tolerance']\n slide_negative = fso.Box(x=2*length, y=slide_width, z=slide_height)\n carriage = carriage - slide_negative\n\n # Create mounting holes\n radius = 0.5*self.parameters['carriage_screw_size']\n base_hole = fso.Cylinder(r=radius,l=2*height)\n hole_list = []\n for i in (-1,1):\n for j in (-1,1):\n xpos = i*0.5*self.parameters['carriage_screw_dL']\n ypos = j*0.5*self.parameters['carriage_screw_dW']\n hole = base_hole.copy()\n hole.translate([xpos,ypos,0])\n hole_list.append(hole)\n # Remove hole material\n # print hole_list\n carriage -= hole_list\n carriage.set_color(self.carriage_color,recursive=True)\n self.carriage = carriage" ]
[ "0.80520225", "0.7438217", "0.6300509", "0.6198885", "0.6186315", "0.60886157", "0.6057705", "0.60341805", "0.6010062", "0.59887075", "0.5942513", "0.59070784", "0.5899582", "0.5894189", "0.5891429", "0.5858376", "0.58501995", "0.58252805", "0.58222425", "0.58174217", "0.5805992", "0.5783291", "0.5781633", "0.5762845", "0.57514054", "0.5749091", "0.5746716", "0.573219", "0.57291096", "0.5704371" ]
0.8772034
0
Adds an element at the end of the list.
def addLast(self, element): if element is None: raise TypeError('The input element is NoneType') newNode = Node(element) if self.__nelems == 0: self.__head = self.__tail = newNode else: self.__tail.setNext(newNode) self.__tail = newNode self.__nelems += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add(self, elem):\n self.add_last(elem)", "def add_last(self, elem):\n if self.is_empty():\n self.head = self.tail = self.Node(elem, None, None)\n else:\n self.tail.nxt = self.Node(elem, self.tail, None)\n self.tail = self.tail.nxt\n\n self.size += 1", "def add_last(self, e):\n self._insert_between(e, self._tail._prev, self._tail)", "def add_last(self, value):\n # Checking for empty list\n if self.head is None:\n self.add_first(value)\n return\n\n # Add new node\n self._add_last(self.head, value)", "def _add_last(cls, node, value):\n # Check if element is the last element\n if node.next_ is None:\n node.next_ = Node(value)\n return\n\n # Recursively go to next node\n cls._add_last(node.next_, value)", "def add_last(self, e):\n newest = self._Node(e, self._tail, None) # node will be new tail node, prev point to old tail\n if self.is_empty():\n self._head = newest # special case: previously empty\n else:\n self._tail._next = newest\n self._tail = newest # update reference to tail node\n self._size += 1", "def __add(self, element):\n\t\tif element.value == None:\n\t\t\telement.value = self._last_value\n\t\t\tself._last_value += 1\n\t\telse:\n\t\t\ttry:\n\t\t\t\tself._last_value = element.value + 1\n\t\t\texcept ValueError:\n\t\t\t\tpass\n\t\t\n\t\tself.elements.append(element)", "def add_last(self, data):\n node = self.Node(data, None)\n\n if self.is_empty():\n self.head = node\n else:\n tail = self.getNode(self.list_size - 1)\n tail.next_node = node\n\n self.list_size += 1", "def append(self, element):\r\n self.elements.append(element)", "def add_last(self, node_to_add):\n if self.head == None:\n self.head = node_to_add\n return\n node = self.head\n # while node.next is not None:*\n while node.next is not None:\n node = node.next\n node.next = node_to_add", "def add_to_end(self, cargo):\r\n if self.size() == 0: # si la liste est encore vide,\r\n self.add(cargo) # ajouter à la fin correspond au ajouter au début\r\n else: # si la liste contient déjà au moins un noeud (et donc une dernier noeud)\r\n node = self.Node(cargo)\r\n self.__last.set_next(node) # make the current last node point to this new node\r\n self.__last = node # set the last node reference to this new node\r\n self.inc_size() # increment list size by one\r", "def add_last(self, e):\n return self._insert_between(e, self._trailer._prev, self._trailer)", "def __add__(self, element):\r\n self.elements += element", "def append(self, value):\n self.list.append(value)", "def append(self, element):\n temp = Node(element)\n self.size += 1\n if self.isEmpty():\n self.head = temp\n self.tail = temp\n else:\n self.tail.right = temp\n self.tail = temp", "def append(self, element):\n node = Node(element)\n if self.head is None:\n self.head = node\n else:\n cursor = self.head\n while cursor.next is not None:\n cursor = cursor.next\n cursor.next = node\n node.prev = cursor", "def add(self, elem):\n assert self._is_int is False\n self._list.append(elem)", "def append(self, value):\n self.__field.validate_element(value)\n return list.append(self, value)", "def append(self, item):\n if self.full or self.pre_allocated:\n # overwrite\n self.data[self.cur] = item\n else:\n self.data.append(item)\n if not self.full:\n self.full = self.cur == self.max - 1\n self.cur = (self.cur + 1) % self.max", "def add_last(self, data):\n self.deque.append(data)", "def add(self, element):\n\n if self.style == 'FIFO': # If FIFO, append element to end of list\n self.queue.append(element)\n\n elif self.style == 'LIFO': # If LIFO, append element to front of list\n self.queue.insert(0, element)", "def append(self, element):\r\n if self.n == self.capacity:\r\n self.__resize(2*self.capacity)\r\n\r\n self.A[self.n] = element\r\n self.n += 1", "def append(self, x):\n self[len(self):] = [x]", "def append(self, item):\n self.update([item])", "def add_last(self, data):\n # if list empty set head and tail as the new Node\n if self.head is None:\n self.tail = Node(data, next=None)\n self.head = self.tail\n # else set new tail\n else:\n self.tail.next = Node(data, next=None)\n # set the skip back pointer if needed\n if self.head != self.tail:\n if self.tail.skip_back is None:\n self.tail.next.skip_back = self.head\n else:\n self.tail.next.skip_back = self.tail.skip_back.next\n # set the tail to the new one\n self.tail = self.tail.next", "def append(self, value):\n self[self._end] = value\n self._end = (self._end + 1) % self._capacity\n\n if self.full:\n self._begin = (self._begin + 1) % self._capacity\n\n else:\n self._size += 1", "def append(self, item):\n # type: (Any) -> None\n list.append(self, self.ref(item))", "def add_to_tail(self, value):\n\n new_node = ListNode(value)\n\n if self.size == 0: # if list is empty\n self.head = self.tail = new_node # make new_node both head and tail\n\n else:\n self.tail.next = new_node # place new_node after tail\n new_node.prev = self.tail # place current tail before new_node\n self.tail = new_node # replace self.tail\n\n self.size += 1 # increase size of list", "def append(self, item):\n \n n = Node(item)\n current = self.head\n \n # Special case - empty list\n if current is None:\n self.head = n\n else:\n # Find the last node\n while current.get_next() is not None:\n current = current.get_next()\n current.set_next(n)", "def insert_last(self, e):\n self._insert_between(e, self._tail._prev, self._tail)" ]
[ "0.801837", "0.7484905", "0.7440537", "0.7360816", "0.7031695", "0.7012047", "0.6965191", "0.6942367", "0.6787635", "0.67849165", "0.6759148", "0.6757302", "0.67473555", "0.67190033", "0.664839", "0.6612984", "0.6594515", "0.6587128", "0.65869623", "0.657475", "0.65145016", "0.6500609", "0.64982945", "0.6486411", "0.64805037", "0.64538336", "0.64515954", "0.6438057", "0.64332116", "0.64094454" ]
0.7491473
1