query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
If votes is not a QuerySet or contains the wrong model, TypeError is raised | def test_wrong_input_type(self):
with self.assertRaises(TypeError):
votes_to_percentages(['not', 'a', 'queryset'])
with self.assertRaises(TypeError):
votes_to_percentages(Disposable.objects.all()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_empty_votes(self):\n with self.assertRaises(ValueError):\n votes_to_percentages(DisposableVote.objects.none())",
"def vote(request, model, object_id):\n if request.method != 'POST':\n raise Http404\n\n vote_type = request.POST.get('type', None)\n if vote_type == 'up' and auth.can_vote_up(request.user):\n vote_type = Vote.VOTE_UP\n elif vote_type == 'down' and auth.can_vote_down(request.user):\n vote_type = Vote.VOTE_DOWN\n else:\n raise Http404\n\n # TODO Ensure users can't vote on their own posts\n\n obj = get_object_or_404(model, id=object_id, deleted=False, locked=False)\n content_type = ContentType.objects.get_for_model(model)\n try:\n existing_vote = Vote.objects.get(content_type=content_type,\n object_id=object_id,\n user=request.user)\n except Vote.DoesNotExist:\n existing_vote = None\n\n if existing_vote is None:\n Vote.objects.create(content_type=content_type,\n object_id=object_id,\n user=request.user,\n vote=vote_type)\n else:\n if vote_type == existing_vote.vote:\n existing_vote.delete()\n else:\n existing_vote.vote = vote_type\n existing_vote.save()\n\n # TODO Reputation management\n\n if request.is_ajax():\n return JsonResponse({\n 'success': True,\n 'score': model._default_manager.filter(\n id=object_id).values_list('score', flat=True)[0],\n })\n else:\n return HttpResponseRedirect(obj.get_absolute_url())",
"def get_for_user(self, obj, user):\n if not user.is_authenticated:\n return None\n content_object = ContentType.objects.get_for_model(obj)\n try:\n vote = self.get(voter=user, content_type=content_object, object_id=obj._get_pk_val())\n\n except ObjectDoesNotExist:\n #print('No vote by {user} on {object}'.format(user=user, object=obj))\n return None\n\n return vote",
"def test_vote_view_allows_to_vote(self):\n self.client.login(username=\"John\", password=\"newpass1234\")\n votes_len = len(PostVotes.objects.all())\n response = self.client.post('/posts/2/vote/', {\"vote\": \"-1\"})\n self.assertEqual(len(PostVotes.objects.all()), votes_len + 1)",
"def get_for_user(self, obj, user):\r\n if not user.is_authenticated():\r\n return None\r\n ctype = ContentType.objects.get_for_model(obj)\r\n try:\r\n vote = self.get(content_type=ctype, object_id=obj._get_pk_val(),\r\n user=user)\r\n except models.ObjectDoesNotExist:\r\n vote = None\r\n return vote",
"def _user_vote(self, user):\n from . import Vote\n\n if not user.is_authenticated:\n return None\n\n return (\n Vote.query\n .filter(Vote.type == 'links')\n .filter(Vote.user_id == user.id)\n .filter(Vote.thing_id == self.id)\n .first()\n )",
"def get_for_user(self, obj, user):\n if not user.is_authenticated():\n return None\n ctype = ContentType.objects.get_for_model(obj)\n try:\n vote = self.get(content_type=ctype, object_id=obj._get_pk_val(),\n user=user)\n except models.ObjectDoesNotExist:\n vote = None\n return vote",
"def test_missing_vote_value(self) -> None:\n self.clear_votes()\n try:\n message = \"successfully voted\"\n QuestionVote.objects.create(\n question=self.question,\n user=self.user,\n )\n except django.db.IntegrityError:\n message = 'Error occured during creation of vote'\n finally:\n self.assertEqual(\n 'Error occured during creation of vote', message)",
"def voter_votes(request, election, voter_uuid):\n voter = Voter.get_by_election_and_uuid(election, voter_uuid)\n votes = CastVote.get_by_voter(voter)\n return [v.toJSONDict() for v in votes]",
"def test_num_votes_none(self):\n q = QuestionFactory()\n QuestionVoteFactory(question=q)\n\n self.refresh()\n\n qs = {'q': '', 'w': 2, 'a': 1, 'num_voted': 2, 'num_votes': ''}\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(200, response.status_code)",
"def test_remove_vote_view_handles_none_existing_votes(self):\n self.client.login(username=\"John\", password=\"newpass1234\")\n votes_len = len(PostVotes.objects.all())\n response = self.client.post('/posts/2/remove_vote/')\n redirect_response = self.client.get(response.url)\n messages = list(redirect_response.context['messages'])\n self.assertEqual('something went wrong', str(messages[0]))",
"def test_vote_view_dont_allow_to_vote_multiple_times(self):\n self.client.login(username=\"John\", password=\"newpass1234\")\n votes_len = len(PostVotes.objects.all())\n try:\n with transaction.atomic():\n response1 = self.client.post('/posts/1/vote/', {\"vote\": \"1\"})\n except:\n pass\n self.assertEqual(len(PostVotes.objects.all()), votes_len)",
"def post_vote(request):\n if request.method == 'POST':\n user = request.user\n post_id = request.data['post_id']\n try:\n post = Post.objects.get(pk=post_id)\n except Post.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n is_positive = request.data['is_positive'].title() == \"True\"\n try:\n oldVote = Vote.objects.get(user = user, post = post)\n if oldVote.is_positive == is_positive:\n oldVote.delete()\n #return Response(oldVote.delete())\n else:\n oldVote.is_positive = is_positive\n oldVote.save()\n newVote = oldVote\n\n except Vote.DoesNotExist:\n newVote = Vote.objects.create(user=user, post=post, is_positive=is_positive)\n\n serializer = PostNestedSerializer(post)\n serializer.Meta.depth = 1;\n return Response(serializer.data)\n #serializer = VoteSerializer(newVote)\n #return Response(serializer.data)",
"def vote_exists(self):\n con = psycopg2.connect(**self.config)\n cur = con.cursor(cursor_factory=RealDictCursor)\n try:\n query = \"SELECT user_id, vote_id FROM votes WHERE answer_id=%s AND user_id=%s\"\n cur.execute(query, (self.answer_id, self.user_id))\n queryset_list = cur.fetchall()\n con.close()\n if len(queryset_list) < 1:\n return False\n return True\n except Exception as e:\n print(e)\n con.close()\n return False",
"def get_voters():",
"def get_voters():",
"def record_vote_simple(self, obj, user, vote):#renamed from original record_vote\n if vote not in (+1, 0, -1):\n raise ValueError('Invalid vote (must be +1/0/-1)')\n ctype = ContentType.objects.get_for_model(obj)\n try:\n v = self.get(user=user, content_type=ctype,\n object_id=obj._get_pk_val())\n if vote == 0:\n v.delete()\n else:\n v.vote = vote\n v.save()\n except models.ObjectDoesNotExist:\n if vote != 0:\n self.create(user=user, content_type=ctype,\n object_id=obj._get_pk_val(), vote=vote)",
"def get_queryset(self):\n return Question.objects.all().order_by(\"-allVote\") #แสดงคำถาม",
"def test_remove_vote_view_allows_to_remove_vote(self):\n self.client.login(username=\"John\", password=\"newpass1234\")\n votes_len = len(PostVotes.objects.all())\n response = self.client.post('/posts/1/remove_vote/')\n self.assertEqual(len(PostVotes.objects.all()), votes_len - 1)",
"def has_voted(self, user):\n return user.choice_set.filter(vote=self).exists()",
"def get_score(self, obj):\n content_type = ContentType.objects.get_for_model(obj)\n result = self.filter(content_type=content_type,\n object_id=obj._get_pk_val()).aggregate(\n score=Sum('vote'),\n num_votes=Count('vote'))\n #It may happen that there has been no voting on this object so far.\n if result['score'] is None:\n result['score'] = 0\n\n result['upvotes'] = self.get_upvotes(obj)\n result['downvotes'] = self.get_downvotes(obj)\n\n return result",
"def record_vote(self, obj, vote, user):\n if vote not in (+1, 0, -1):\n raise ValueError('Invalid vote (must be +1/0/-1)')\n content_type = ContentType.objects.get_for_model(obj)\n # First, try to fetch the instance of this row from DB\n # If that does not exist, then it is the first time we're creating it\n # If it does, then just update the previous one\n try:\n vote_obj = self.get(voter=user, content_type=content_type, object_id=obj._get_pk_val())\n if vote == 0 and not ZERO_VOTES_ALLOWED:\n vote_obj.delete()\n else:\n vote_obj.vote = vote\n vote_obj.save()\n\n except ObjectDoesNotExist:\n #This is the first time we're creating it\n try:\n if not ZERO_VOTES_ALLOWED and vote == 0:\n # This shouldn't be happening actually\n return\n vote_obj = self.create(voter=user, content_type=content_type, object_id=obj._get_pk_val(), vote=vote)\n except:\n print(( '{file}: something went wrong in creating a vote object at {line}'.format(file=str('__FILE__'), line=str('__LINE__'))))\n raise ObjectDoesNotExist\n\n return vote_obj",
"def vote(self):\n if self.vote_exists():\n return self.update_vote()\n return self.create_vote()",
"def test_vote_count(self) -> None:\n self.downvote()\n self.downvote()\n vote_count = QuestionVote.objects.all().count()\n self.assertEqual(vote_count, 1)",
"def test_multiple_vote(self) -> None:\n try:\n message = \"successfully voted\"\n QuestionVote.objects.create(\n vote=1,\n question=self.question,\n user=self.user,\n )\n except django.db.IntegrityError:\n message = 'Error occured during creation of vote'\n finally:\n self.assertEqual(\n 'Error occured during creation of vote', message)",
"def get_vote(self, id: int) -> dict:",
"def record_vote(self, obj, user, vote):\r\n if vote not in (+1, 0, -1):\r\n raise ValueError('Invalid vote (must be +1/0/-1)')\r\n ctype = ContentType.objects.get_for_model(obj)\r\n try:\r\n v = self.get(user=user, content_type=ctype,\r\n object_id=obj._get_pk_val())\r\n if vote == 0:\r\n v.delete()\r\n else:\r\n v.vote = vote\r\n v.save()\r\n except models.ObjectDoesNotExist:\r\n if vote != 0:\r\n self.create(user=user, content_type=ctype,\r\n object_id=obj._get_pk_val(), vote=vote)",
"def get_ratings(self):\n return Vote.objects.filter(content_type=self.get_content_type(), object_id=self.instance.pk, key=self.field.key)",
"def upvote(id,vote_type):\n # Query for user\n votes = Votes.query.filter_by(user_id=current_user.id).all()\n to_str=f'{vote_type}:{current_user.id}:{id}'\n\n if not votes:\n new_vote = Votes(vote=vote_type, user_id=current_user.id, posts_id=id)\n new_vote.save_vote()\n flash('YOU HAVE VOTED', 'success')\n\n for vote in votes:\n if f'{vote}' == to_str:\n\n break\n else: \n new_vote = Votes(vote=vote_type, user_id=current_user.id, posts_id=id)\n new_vote.save_vote()\n \n break\n\n return redirect(url_for('.view_post', id=id))",
"def sum_rating(self):\n return self.get_queryset().aggregate(Sum('vote')).get('vote__sum') or 0"
] | [
"0.6306854",
"0.59208554",
"0.58282936",
"0.55802065",
"0.5557189",
"0.55157584",
"0.5507421",
"0.542246",
"0.53265035",
"0.5298787",
"0.52821714",
"0.52754",
"0.5245972",
"0.52318245",
"0.522503",
"0.522503",
"0.5198582",
"0.51905507",
"0.5157574",
"0.5144505",
"0.5129177",
"0.5094992",
"0.5071105",
"0.50531805",
"0.5052006",
"0.50509804",
"0.5048772",
"0.50468653",
"0.50218517",
"0.5019363"
] | 0.61271936 | 1 |
If votes is empty, a ValueError is raised | def test_empty_votes(self):
with self.assertRaises(ValueError):
votes_to_percentages(DisposableVote.objects.none()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_missing_vote_value(self) -> None:\n self.clear_votes()\n try:\n message = \"successfully voted\"\n QuestionVote.objects.create(\n question=self.question,\n user=self.user,\n )\n except django.db.IntegrityError:\n message = 'Error occured during creation of vote'\n finally:\n self.assertEqual(\n 'Error occured during creation of vote', message)",
"def test_num_votes_none(self):\n q = QuestionFactory()\n QuestionVoteFactory(question=q)\n\n self.refresh()\n\n qs = {'q': '', 'w': 2, 'a': 1, 'num_voted': 2, 'num_votes': ''}\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(200, response.status_code)",
"def test_tally_no_votes(self):\n self.populate_database()\n self.electionA.elect_open = False\n with self.assertRaises(NoVotes):\n self.wta.check_race(self.raceA.id)\n\n with self.assertRaises(NoVotes):\n self.proportional.check_race(self.raceA.id)\n\n with self.assertRaises(NoVotes):\n self.schulze.check_race(self.raceA.id)",
"def test_remove_vote_view_handles_none_existing_votes(self):\n self.client.login(username=\"John\", password=\"newpass1234\")\n votes_len = len(PostVotes.objects.all())\n response = self.client.post('/posts/2/remove_vote/')\n redirect_response = self.client.get(response.url)\n messages = list(redirect_response.context['messages'])\n self.assertEqual('something went wrong', str(messages[0]))",
"def test_wrong_input_type(self):\n with self.assertRaises(TypeError):\n votes_to_percentages(['not', 'a', 'queryset'])\n with self.assertRaises(TypeError):\n votes_to_percentages(Disposable.objects.all())",
"def test_vote_when_none_choice_was_selected(self):\n question_no_choices = create_question_without_choices(question_text=\"Question wihout Choices.\", days=-1)\n url = reverse('polls:vote', args=(question_no_choices.id,))\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, question_no_choices)\n self.assertTrue('error_message' in response.context)\n self.assertEqual(response.context['error_message'], \"You didn't select a choice.\")",
"def test_unsuccessful_rating_with_empty_rate_value(self):\n response = self.client.post(\n reverse('articles:rate', kwargs={'slug': self.slug}),\n {'rating': None},\n format=\"json\",\n **self.headers)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(str(response.data['errors']['rating'][0]),\n self.violate_empty_value_error_message)",
"async def applyVote(self, votes):\n voteCount = {vote: 0 for vote in self.getMembersName()}\n voteCount[None] = 0\n for vote in votes.values():\n voteCount[vote] += 1\n\n if voteCount[None] != 0:\n await self.textChannel.send(\n \"Attention, des joueurs n'ont pas voté / ont mal écrit, les votes peuvent être faussés.\")\n del voteCount[None]\n\n playerOrder = sorted(voteCount.items(), key=lambda x: x[1], reverse=True)\n print(\"playerOrder\", playerOrder)\n if playerOrder[0][1] == 0: # Nobody vote\n await self.textChannel.send(\"`Partie non valide`, personne n'a voté.\")\n\n elif playerOrder[0][1] == 1: # People think nobody is a werewolf\n await self.textChannel.send(\"Le village pense qu'il n'y a pas de loups-garou ? Vérification ...\")\n werewolves = self.getWolves()\n if len(werewolves) == 0:\n await self.textChannel.send(\"Le village a raison, il n'y a pas de loups-garous parmis eux.\")\n await self.textChannel.send(\"```css\\nLES VILLAGEOIS ONT GAGNÉ```\")\n else:\n await self.textChannel.send(\"Malheuresement, il y avait```\" + \", \".join(werewolves) + \"```\")\n await self.textChannel.send(\"```diff\\n-LES LOUPS-GAROUS ONT GAGNÉ-```\")\n\n else: # Classic vote\n werewolves = self.getWolves()\n deaths = []\n for i in range(len(playerOrder)):\n player = self.getMemberFromName(name=playerOrder[i][0])\n isDead = await player.isDead(channel=self.textChannel)\n if isDead:\n deaths += await player.death(channel=self.textChannel, members=self.players)\n print(\"voteCount :\", voteCount)\n\n # Get player name with same number of vote against them\n playerEqualVote = []\n for p in playerOrder:\n if p[1] == playerOrder[i][1] and p[0] != playerOrder[i][0]:\n playerEqualVote.append(self.getMemberFromName(name=p[0]))\n print(\"Other players with equals number of vote :\", playerEqualVote)\n for otherPlayer in playerEqualVote:\n isDead = await otherPlayer.isDead(channel=self.textChannel)\n if isDead:\n deaths += await otherPlayer.death(channel=self.textChannel, members=self.players)\n break\n\n for i in range(len(deaths)):\n if deaths[i] is None:\n del deaths[i]\n\n if len(deaths) == 0: # No one die\n if len(werewolves) == 0: # No Werewolves\n await self.textChannel.send(\"Il n'ya pas eu de mort et il n'y a aucun Loup-Garou !\")\n await self.textChannel.send(\"```css\\nLES VILLAGEOIS ONT GAGNÉ```\")\n else: # Werewolves among players\n await self.textChannel.send(\n \"Il n'y a pas eu de mort mais```\" + \", \".join(werewolves) + \"```\")\n await self.textChannel.send(\"```diff\\n-LES LOUPS-GAROUS ONT GAGNÉ-```\")\n\n elif len(deaths) == 1:\n if deaths[0].lastRole in [\"Loup-Garou\", \"Loup Alpha\", \"Loup Shamane\", \"Loup rêveur\"]: # Werewolf die\n await self.textChannel.send(\"```css\\nLES VILLAGEOIS ONT GAGNÉ```\")\n elif deaths[0].lastRole in [\"Tanneur\"]: # Tanner died\n await self.textChannel.send(\"```Fix\\n#LE TANNEUR A GAGNÉ#```\")\n if len(werewolves) > 0: # Wolves in game\n await self.textChannel.send(\"```diff\\n-LES LOUPS-GAROUS ONT ÉGALEMENT GAGNÉ```\")\n else: # Villager died\n await self.textChannel.send(\"```diff\\n-LES LOUPS-GAROUS ONT GAGNÉ-```\")\n\n else: # more than 2 deaths\n rolesDead = []\n for dead in deaths:\n if dead.lastRole in [\"Loup-Garou\", \"Loup Alpha\", \"Loup Shamane\", \"Loup rêveur\"]:\n rolesDead.append(\"Loup-Garou\")\n elif dead.lastRole in [\"Tanneur\"]:\n await self.textChannel.send(\"```Fix\\n#LE TANNEUR A GAGNÉ#```\")\n else:\n rolesDead.append(\"Villageois\")\n print(\"rolesDead :\", rolesDead)\n rolesDead = list(dict.fromkeys(rolesDead))\n print(\"rolesDead unique :\", rolesDead)\n if \"Loup-Garou\" in rolesDead:\n await self.textChannel.send(\"```css\\nLES VILLAGEOIS ONT GAGNÉ```\")\n else:\n await self.textChannel.send(\"```diff\\n-LES LOUPS-GAROUS ONT GAGNÉ-```\")",
"def result_poll(votes):\n return sum(votes) >= 2 / 3 * len(votes)",
"def test_multiple_vote(self) -> None:\n try:\n message = \"successfully voted\"\n QuestionVote.objects.create(\n vote=1,\n question=self.question,\n user=self.user,\n )\n except django.db.IntegrityError:\n message = 'Error occured during creation of vote'\n finally:\n self.assertEqual(\n 'Error occured during creation of vote', message)",
"def check_vote_node(data):\n\n if 'userId' not in data: # not login\n # raise ValueError(\"No userId in given vote.\")\n data['userId'] = \"\"\n\n if 'type' not in data:\n raise ValueError(\"No type of vote given.\")\n\n if data['type'] <> \"1\" and data['type'] <> \"-1\":\n raise ValueError(\"Invalid type of vote. Can only be 1 or -1.\")\n\n if 'nodeId' not in data:\n raise ValueError(\"No nodeId given in vote. Who are you voting on?\")\n\n id_node = Nodes().retrieveById(data['nodeId'])\n if id_node.status_code == 404:\n raise ValueError(\"Cannot find the node voting on.\")",
"def voteCheck(number):\n\n if number >= MIN_VOTES and number <= MAX_VOTES:\n return True\n else:\n return False\n number = input(\"\\n\\tEnter votes: \")",
"def negative_votes(self):\n return self._get(\"negative_votes\")",
"def test_vote_generator(self):\n self.assertEqual(len(self.vote_ballot), 6)",
"def test_questions_num_votes(self):\n q = QuestionFactory(title=u'tags tags tags')\n\n # Add two question votes\n QuestionVoteFactory(question=q)\n QuestionVoteFactory(question=q)\n\n self.refresh()\n\n # Advanced search for questions with num_votes > 5. The above\n # question should be not in this set.\n response = self.client.get(reverse('search.advanced'), {\n 'q': '', 'tags': 'desktop', 'w': '2', 'a': '1',\n 'num_voted': 2, 'num_votes': 5,\n 'format': 'json'\n })\n\n eq_(200, response.status_code)\n\n content = json.loads(response.content)\n eq_(content['total'], 0)\n\n # Advanced search for questions with num_votes < 1. The above\n # question should be not in this set.\n response = self.client.get(reverse('search.advanced'), {\n 'q': '', 'tags': 'desktop', 'w': '2', 'a': '1',\n 'num_voted': 1, 'num_votes': 1,\n 'format': 'json'\n })\n\n eq_(200, response.status_code)\n\n content = json.loads(response.content)\n eq_(content['total'], 0)",
"def num_bad_votes(self):\n return self.qualities.filter(correct=False).count()",
"def create_vote(self):\n con = psycopg2.connect(**self.config)\n cur = con.cursor(cursor_factory=RealDictCursor)\n try:\n query = \"INSERT INTO votes(user_id, answer_id, vote) VALUES(%s, %s, %s)\"\n cur.execute(query, (self.user_id, self.answer_id, self.vote_value))\n con.commit()\n except Exception as e:\n print(e)\n con.close()\n return False\n return True",
"def test_wrong_vote_parameter(self):\n res = self.client().post(\n '/api/v2/auth/login',\n headers=self.get_accept_content_type_headers(),\n data=json.dumps(ADMIN_LOGIN)\n )\n response_msg = json.loads(res.data.decode(\"UTF-8\"))\n access_token = response_msg[\"data\"][0][\"token\"]\n self.create_meetup(access_token, MEETUP)\n access_token = self.get_access_token(USER_REGISTRATION, USER_LOGIN)\n self.create_question(access_token, QUESTION)\n access_token = self.get_access_token(NEW_USER_REGISTRATION, NEW_USER_LOGIN)\n res = self.client().patch(\n '/api/v2/questions/1/vote',\n headers=self.get_authentication_headers(access_token)\n )\n response_msg = json.loads(res.data.decode(\"UTF-8\"))\n self.assertEqual(res.status_code, 400)\n self.assertEqual(\n response_msg[\"message\"][\"error\"],\n \"Vote path parameter can either be upvote / downvote\"\n )",
"def test_missing_question(self) -> None:\n try:\n message = \"successfully voted\"\n QuestionVote.objects.create(\n vote=1,\n user=self.user,\n )\n except django.db.IntegrityError:\n message = 'Error occured during creation of vote'\n finally:\n self.assertEqual(\n 'Error occured during creation of vote', message)",
"def enoughForLeader(self, votes):\n entry = self.getConfig()\n if entry['config'] == 'single':\n validVotes = len(set(entry['data'].keys()) & set(votes))\n return validVotes > len(entry['data']) / 2\n validVotesOld = len(set(entry['data'][0].keys()) & set(votes))\n validVotesNew = len(set(entry['data'][1].keys()) & set(votes))\n return validVotesOld > len(entry['data'][0]) / 2 and \\\n validVotesNew > len(entry['data'][1]) / 2",
"def test_vote_unparsable(self):\r\n mock_module = CHModuleFactory.create()\r\n # None means that the answer couldn't be parsed.\r\n mock_module.answer_signature = lambda text: None\r\n json_in = {'answer': 'fish', 'hint': 3, 'pk_list': '[]'}\r\n dict_out = mock_module.tally_vote(json_in)\r\n print dict_out\r\n self.assertTrue(dict_out == {'error': 'Failure in voting!'})",
"def test_vote_view_allows_to_vote(self):\n self.client.login(username=\"John\", password=\"newpass1234\")\n votes_len = len(PostVotes.objects.all())\n response = self.client.post('/posts/2/vote/', {\"vote\": \"-1\"})\n self.assertEqual(len(PostVotes.objects.all()), votes_len + 1)",
"def process_VOTED(self, msg):\n\n result = parseYesOrNo(' '.join(msg[1:]))\n if result is not None:\n assert self._vote is not None\n self._vote.set(result)",
"def test_raise_error_if_not_all_obj_are_candidate_objects(self):\n\n candidate1 = pyrankvote.Candidate(\"Per\")\n candidate2 = \"Aase\"\n\n def tester(_):\n pyrankvote.Ballot(ranked_candidates=[candidate1, candidate2])\n\n msg = \"Candidate 2 is a string, not a Candidate, and should raise a TypeError\"\n self.assertRaises(TypeError, tester, msg)\n\n # TEST THE OPPOSITE\n candidate1 = pyrankvote.Candidate(\"Per\")\n candidate2 = pyrankvote.Candidate(\"Aase\")\n\n # This should NOT raise an error\n pyrankvote.Ballot(ranked_candidates=[candidate1, candidate2])",
"def vote_exists(self):\n con = psycopg2.connect(**self.config)\n cur = con.cursor(cursor_factory=RealDictCursor)\n try:\n query = \"SELECT user_id, vote_id FROM votes WHERE answer_id=%s AND user_id=%s\"\n cur.execute(query, (self.answer_id, self.user_id))\n queryset_list = cur.fetchall()\n con.close()\n if len(queryset_list) < 1:\n return False\n return True\n except Exception as e:\n print(e)\n con.close()\n return False",
"def upsert_uncaptured_votes(cls):\n\n query = \"\"\"\n SELECT DISTINCT most_recent_house_vote_id as vote_id\n FROM bills\n WHERE most_recent_house_vote_id IS NOT NULL\n\n UNION\n\n SELECT DISTINCT most_recent_senate_vote_id as vote_id\n FROM bills\n WHERE most_recent_senate_vote_id IS NOT NULL\n\n EXCEPT\n\n SELECT DISTINCT vote_id\n -- This is legislative votes, named poorly\n FROM votes\n \"\"\"\n\n bills = DB().fetch_records(query)\n for result_tuple in bills:\n vote_id = result_tuple[0]\n print(vote_id)\n lv = LegislativeVotes(vote_id)\n lv.upsert_bill_votes()\n\n ilv = IndividualLegislatorVote(vote_id)\n ilv.upsert_all_votes()",
"def test_vote_nohint(self):\r\n mock_module = CHModuleFactory.create()\r\n json_in = {'answer': '24.0', 'hint': '25', 'pk_list': '[]'}\r\n dict_out = mock_module.tally_vote(json_in)\r\n self.assertTrue(dict_out == {'error': 'Failure in voting!'})",
"def test_missing_question_upvote(self) -> None:\n self.question_id = self.fake_id\n self.upvote_url = '/api/meetups/{}/questions/{}/upvote'.format(\n self.meetup_id, self.question_id)\n response = self.upvote()\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data.get('error'),\n 'The meetup does not have a question with that id')",
"def process_vote(self, comment_id, username, value):\n raise NotImplementedError()",
"def voter_votes(request, election, voter_uuid):\n voter = Voter.get_by_election_and_uuid(election, voter_uuid)\n votes = CastVote.get_by_voter(voter)\n return [v.toJSONDict() for v in votes]"
] | [
"0.6663037",
"0.6053366",
"0.5947015",
"0.5833709",
"0.574379",
"0.5649357",
"0.5536872",
"0.5523675",
"0.54281944",
"0.54198414",
"0.5394457",
"0.5368231",
"0.5327113",
"0.5319719",
"0.5312464",
"0.5308163",
"0.5286258",
"0.5258488",
"0.5245959",
"0.52093315",
"0.5204067",
"0.5185538",
"0.5175418",
"0.51640797",
"0.51636696",
"0.51570624",
"0.5154179",
"0.5146385",
"0.51363355",
"0.51347566"
] | 0.76942736 | 0 |
Busca un cero usando el metodo de la biseccion. func es la funcion, a y b encajonan el cero, tol=toleracia | def biseccion(func, a, b, tol=1e-4):
p = (a + b) / 2
while np.fabs(func(p)) > tol:
p = (a + b) / 2
if func(a) * func(p) < 0:
b = p
elif func(a) * func(p) > 0:
a = p
else:
return p
return p | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bisezione(f,a,b,toll=10**-5):\n m = (a+b)/2\n f_m = f(m)\n while abs(f_m) > toll:\n if f(a)*f_m < 0:\n b = m\n elif f(b)*f_m < 0:\n a = m\n elif f_m == 0:\n print(\"Trovata solzione esatta\")\n return m\n else:\n print(\"Metodo fallito\")\n return None\n m = (a+b)/2\n f_m = f(m)\n return m",
"def ChordMethod(f, a=0.0, b=1.0, x=0.75, tol=1e-10):\n\tstart = time()\n\n\t# Initialization of function values and error \n\tf_a = f(a)\n\tf_b = f(b)\n\tf_x = f(x)\n\terror = np.abs(f_x)\n\n\t# Initialization of iter number and array of error values\n\ti = 0\n\terrs = []\n\n\tif f_a*f_b<0:\n\t\twhile error > tol:\n\t\t\terrs.append(error)\n\n\t\t\tif f_a*f_x < 0:\n\t\t\t\tx = (x*f_a - a*f_x) / (f_a - f_x)\n\t\t\telse:\n\t\t\t\tx = (x*f_b - b*f_x) / (f_b - f_x)\n\n\t\t\tf_x = f(x)\n\t\t\terror = np.abs(f_x)\n\t\t\ti = i+1\n\telse:\n\t\tprint(\"Function values are of the same sign!\")\n\tend = time()\n\treturn x, (end-start), i",
"def bCheck(c, v, p, b):\n val = (v+1).floor()\n deg = c.degree()\n coeffs = c.coefficients(sparse=False)\n lcoeff = coeffs[deg]; coeffs.remove(lcoeff)\n check1 = [(coeffs[i].valuation(p) - lcoeff.valuation(p))/(deg - i) for i in range(0,len(coeffs)) if coeffs[i] != 0]\n check2 = (val - lcoeff.valuation(p))/deg\n check1.append(check2)\n bval = min(check1)\n return (bval).ceil()",
"def BisectionMethod(f, a=0, b=1, tol=1e-10):\n\tstart = time()\n\tf_a = f(a)\n\tf_b = f(b)\n\t\n\t# Initialization of errors and iters\n\terrs = []\n\ti = 0\n\n\tif f_a == 0:\n\t\treturn a\n\telif f_b == 0:\n\t\treturn b\n\telif f_a*f_b > 0:\n\t\tprint(\"The function values have the same sign!\")\n\telse:\n\t\terror = b-a\n\t\twhile error > tol:\n\t\t\tc = (b+a)/2\n\t\t\tf_c = f(c)\n\t\t\t\n\t\t\terrs.append(error)\n\t\t\t\n\t\t\tif f_a*f_c > 0:\n\t\t\t\ta = c\n\t\t\t\tf_a = f_c\n\t\t\telif f_a*f_c < 0:\n\t\t\t\tb = c\n\t\t\t\tf_b = f_c\n\t\t\telse:\n\t\t\t\tbreak\n\t\t\terror = b-a\n\t\t\ti = i+1\n\tend = time()\n\treturn c, (end-start), i",
"def test_to_celcius():\n\tassert to_celcius(32) == 0\n\tpass",
"def classify(x, c, b):\n if x<c-b:\n return 0\n elif x>c+b:\n return 1\n else:\n if b>10**-7:\n return (x-c+b)/2/b\n else:\n return 0.5",
"def test_3(self):\n print(\"Consumir con cedula incorrecta\")\n billetera1=BilleteraElectronica(1,\"Maria\", \"Bra\", 20267824, 1234)\n billetera1.recargar(100, \"20/12/2017\", \"Comercio1\", 20267824)\n self.assertEqual(billetera1.saldo(), 100)\n billetera1.consumir(50, \"22/15/2017\", \"Comercio1\", 20267823, 1234)\n self.assertEqual(billetera1.saldo(), 100)",
"def test_get_b():\n\n assert get_b(100, 143, 255) != 100\n assert get_b(100, 143, 255) != 143\n assert get_b(100, 143, 255) == 255",
"def find_c(b):\n return (2*(b**2) - 2000*b + 1000000)/(2000 - 2*b)",
"def valor_absoluto(numero):\r\n if numero >= 0:\r\n return numero\r\n else:\r\n return - numero",
"def residuo_cero(numero):\n for x in range (1,10):\n if(numero % x == 0):\n return x \n return numero",
"def detectar_constantes_btc():\n\n ultimos_precios = persistence.traer_ultimos_precios_btc()\n prev = int(ultimos_precios[0])\n porcentaje = 0\n counter = 0\n for i in range(1,60):\n if prev < int(ultimos_precios[i]):\n counter = counter + 1\n elif prev > int(ultimos_precios[i]):\n counter = counter - 1\n prev = int(ultimos_precios[i])\n porcentaje = calcular_porcentaje(int(ultimos_precios[0]), int(ultimos_precios[i]))\n porcentaje = round(porcentaje, 2)\n if counter > 10 and porcentaje > 1:\n return porcentaje\n elif counter < -10 and porcentaje < -1:\n return porcentaje\n else:\n return 0",
"def costo_camion(nombre_archivo):\n\n f = open(nombre_archivo, encoding='utf8')\n rows = csv.reader(f)\n next(rows)\n frutas = []\n for row in rows:\n try:\n row[1] = int(row[1])\n except ValueError:\n print(f'Warning: {row[0]} no tiene un precio válido')\n try:\n row[2] = float(row[2])\n except:\n print(f'Warning: {row[0]} no posee una cantidad de cajones')\n\n frutas.append(row)\n \n f.close()\n \n costo_total = 0\n for fruta in frutas:\n try:\n costo_total += (fruta[1] * fruta[2])\n except TypeError:\n print(f'Warning: {fruta[0]} no tiene un precio válido y no se sumará al costo total')\n \n return costo_total",
"def costo_camion(nombre_archivo):\n\n f = open(nombre_archivo, encoding='utf8')\n rows = csv.reader(f)\n next(rows)\n frutas = []\n for row in rows:\n try:\n row[1] = int(row[1])\n except ValueError:\n print(f'Warning: {row[0]} no tiene un precio válido')\n try:\n row[2] = float(row[2])\n except:\n print(f'Warning: {row[0]} no posee una cantidad de cajones')\n\n frutas.append(row)\n \n f.close()\n \n costo_total = 0\n for fruta in frutas:\n try:\n costo_total += (fruta[1] * fruta[2])\n except TypeError:\n print(f'Warning: {fruta[0]} no tiene un precio válido y no se sumará al costo total')\n \n return costo_total",
"def metodo1(b, u, lam, gam, v, n):\n\n ro = calculo_ro(b, u, lam, gam, v, n)\n vro = calculo_validador(u, lam, gam)\n\n return ro, vro",
"def test_9(self):\n print(\"Consumir 0 bs sinb haber recargado\")\n billetera1=BilleteraElectronica(1,\"Maria\", \"Bra\", 20267824, 1234)\n billetera1.consumir(0, \"22/15/2017\", \"Comercio1\", 20267824, 1234)\n self.assertEqual(billetera1.saldo(), 0)",
"def custo(EstadoRestaUm, resultante):\n return 1",
"def test_c2f():\n assert temperatura.c2f(3) == 37.4",
"def closerecth(f, bdil=None, bc=None):\n\n if bdil is None: bdil = secross()\n if bc is None: bc = secross()\n y = subm(closerec(f,bdil,bc), f)\n return y",
"def cdf(x, a, b):\n with mp.extradps(5):\n a, b = _validate_a_b(a, b)\n if x < 0:\n return mp.zero\n if x > 1:\n return mp.one\n return mp.betainc(a, b, x1=0, x2=x, regularized=True)",
"def askCash(total):\n respuesta = float(input(\"CUANTO PAGO EL REPARTIDOR? \"))\n if respuesta <= total:\n result = float(total) - respuesta\n return result\n else:\n print(\"EL PAGO TIENE QUE SER MENOR O IGUAL AL TOTAL DE LA ORDEN\")\n askCash(total)",
"def test_61(self):\n print(\"Recargar con cedula correcta\")\n billetera1=BilleteraElectronica(1,\"Maria\", \"Bra\", 20267824, 1234)\n billetera1.recargar(100, \"20/12/2017\", \"Comercio1\", 20267824)\n self.assertEqual(billetera1.saldo(), 100)",
"def test_f2c():\n assert temperatura.f2c(32) == 0",
"def bc_flower(teff):\n lteff=np.log10(teff)\n if (lteff<3.7):\n bcflow=-0.190537291496456e+05+0.155144866764412e+05*lteff-0.421278819301717e+04*(lteff*lteff)+0.381476328422343e+03*(lteff*lteff*lteff)\n if (lteff>=3.7 and lteff<3.9):\n bcflow=-0.370510203809015e+05+0.385672629965804e+05*lteff-0.150651486316025e+05*(lteff*lteff)+0.261724637119416e+04*(lteff*lteff*lteff)-0.170623810323864e+03*(lteff*lteff*lteff*lteff)\n if (lteff>=3.9):\n bcflow=-0.370510203809015e+05+0.385672629965804e+05*lteff-0.150651486316025e+05*(lteff*lteff)+0.261724637119416e+04*(lteff*lteff*lteff)-0.170623810323864e+03*(lteff*lteff*lteff*lteff)\n return bcflow",
"def GetConcBeer(Abs, epsilon, pathLength):\n return Abs / (epsilon * pathLength)",
"def test_10(self):\n print(\"Consumir 0 bs y haber recargado\")\n billetera1=BilleteraElectronica(1,\"Maria\", \"Bra\", 20267824, 1234)\n billetera1.recargar(100, \"20/12/2sinb017\", \"Comercio1\", 20267824)\n self.assertEqual(billetera1.saldo(), 100)\n billetera1.consumir(0, \"22/15/2017\", \"Comercio1\", 20267824, 1234)\n self.assertEqual(billetera1.saldo(), 100)",
"def bisect(rlo, rhi, acc, tol, fun, params=None):\n while rhi-rlo>acc:\n r=0.5*(rhi+rlo)\n if params: isvalid=fun(r,tol,params)\n else: isvalid=fun(r,tol)\n if isvalid:\n rlo=r\n else:\n rhi=r\n return rlo",
"def gera_num_cc(abv):\n \n # Ao recebermos a indicacao de que entidade se pretende gerar um numero, usamos a funcao auxiliar escolhe_iin_comp para escolher aleatoriamente os digitos iniciais e o comprimento do cartao.\n # O numero final comeca por ser os digitos iniciais, juntando a estes, do lado direito, numeros aleatorios ate chegarmos ao comprimento pretendido menos 1. O ultimo digito sera o digito de verificacao.\n \n dig_iniciais , comp = escolhe_iin_comp(abv) \n num_cc = dig_iniciais\n \n for i in range(comp-len(dig_iniciais)-1): \n num_cc = num_cc + str(int(random()*10)) \n \n num_cc = num_cc + digito_verificacao(num_cc)\n \n return int(num_cc)",
"def test_1(self):\n print(\"Consumir mas de lo que recargue en la cuenta\")\n billetera1=BilleteraElectronica(1,\"Maria\", \"Bra\", 20267824, 1234)\n billetera1.recargar(100, \"20/12/2017\", \"Comercio1\", 20267824)\n self.assertEqual(billetera1.saldo(), 100)\n billetera1.consumir(250, \"22/15/2017\", \"Comercio1\", 20267824, 1234)\n self.assertEqual(billetera1.saldo(), 100)",
"def test_41(self):\n print(\"Consumir con clave correcta y cedula correcta\")\n billetera1=BilleteraElectronica(1,\"Maria\", \"Bra\", 20267824, 1234)\n billetera1.recargar(100, \"20/12/2017\", \"Comercio1\", 20267824)\n self.assertEqual(billetera1.saldo(), 100)\n billetera1.consumir(50, \"22/15/2017\", \"Comercio1\", 20267824, 1234)\n self.assertEqual(billetera1.saldo(), 50)"
] | [
"0.5555701",
"0.5502055",
"0.5292556",
"0.52499145",
"0.5237464",
"0.51849324",
"0.5158482",
"0.5082896",
"0.50477093",
"0.5032028",
"0.50297546",
"0.5017506",
"0.5013907",
"0.5013907",
"0.50065714",
"0.5002977",
"0.49987894",
"0.49891984",
"0.4987446",
"0.49794155",
"0.497534",
"0.49394616",
"0.49021155",
"0.48940533",
"0.4892168",
"0.48860425",
"0.48845595",
"0.48516613",
"0.48502803",
"0.48488322"
] | 0.58301175 | 0 |
return the child point q corresponding to p in the parent. note that q.side == p.side THINK ABOUT IT | def parent_to_child(p):
a,b = LINES[p.side]
if p.x < 0.5:
return Point(a, p.side, 2*p.x)
else:
return Point(b, p.side, 2*p.x - 1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parent(self,p):\n node = self._validate(p)\n return self._make_position(node._parent)",
"def parent(self, p):\n node = self._validate_position(p)\n return self._make_position(node)",
"def parent(self, p):\n node = self._validate(p)\n return self._make_position(node._parent)",
"def parent(self, p):\n node = self._validate(p)\n return self._make_position(node.parent)",
"def _parent(child):\n parent = Pos(n=child.n - 1, x=child.x // 2, y=child.y // 2)\n left = child.x % 2\n top = child.y % 2\n return (parent, left, top)",
"def parent(self, p):\n node = self._validate(p)\n return self._make_position(node._parent)",
"def parent(self, p):\n node = self._validate(p)\n return self._make_position(node._parent)",
"def parent(self, p):\n node = self._validate(p)\n return self._make_position(node._parent)",
"def _get_parent(self, child_ix):\n if child_ix == 0:\n return None\n t = 1 if child_ix & 1 else 2\n return (child_ix - t) / 2",
"def parent(self, pos): \n return pos//2",
"def parent(self, p):\n raise NotImplementedError( must be implemented by subclass)",
"def parent(self, pos):\n return pos // 2",
"def parent(self, pos):\n return pos // 2",
"def parent(self, p):\n raise NotImplementedError('must be implemented by subclass')",
"def parent(self, p):\n raise NotImplementedError('must be implemented by subclass')",
"def parent(self, p):\n raise NotImplementedError('must be implemented by subclass')",
"def _parent(self, j):\n return (j - 1) // 2",
"def _parent(self, j):\n return (j - 1) // 2",
"def parent(self, p):\n raise NotImplemented(\"must be implemented by subclass\")",
"def parent(self, pos):\n if pos == 0: \n return None\n return int(math.ceil(pos / self.dary) - 1)",
"def find(self, p):\n if self.parent[p] != p: \n self.parent[p] = self.find(self.parent[p])\n return self.parent[p]",
"def parent(self, n):\n return n._parent",
"def find(p):\n if p != parent[p]:\n parent[p] = find(parent[p])\n return parent[p]",
"def get_parent_index(self, child):\n return (child-1)//2",
"def parent(self):\n return self.key().parent()",
"def fm_get_parent(self, idx):\n return self._relation_lst[self.PARENT][idx]",
"def parent(self, index):\n if index == 0:\n print(\"index 0 has no parent\")\n return None\n return (index - 1) // 2",
"def find(self, p):\n parent = self._parent\n while p != parent[p]:\n p = parent[p] # !!cannot apply path compression to this problem\n return p",
"def parent(self, node):\n self._validate_node(node)\n idx = node._index\n if idx == 0:\n return None # Root node has no parent\n if idx % 2 == 0:\n return self._array[(idx-2)//2] # Right child (even number)\n return self._array[(idx-1)//2] # left child (odd number)",
"def inside(self, p: PointType, q: PointType) -> bool:\n\n # XXX re-implement with ccw and a list of points instead of a pair\n\n i = min(p.x, q.x) < self.x < max(p.x, q.x)\n j = min(p.y, q.y) < self.y < max(p.y, q.y)\n\n return i and j"
] | [
"0.70021415",
"0.6914503",
"0.68851006",
"0.67637295",
"0.67503387",
"0.67251015",
"0.67251015",
"0.67251015",
"0.6384844",
"0.636594",
"0.6338018",
"0.63363457",
"0.63363457",
"0.62673676",
"0.62673676",
"0.624849",
"0.6232469",
"0.6232469",
"0.619909",
"0.609522",
"0.59893197",
"0.5876296",
"0.5869377",
"0.58616656",
"0.5829268",
"0.5829248",
"0.5799197",
"0.5771841",
"0.5738626",
"0.57283545"
] | 0.7743672 | 0 |
return a random endpoint in the current child not on taken_side | def random_endpoint(child, taken_side=None):
sides = [s for s in SIDES[child] if s != taken_side]
return Point(child, random.choice(sides), 0 if random.random() < 0.5 else 1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_random_pos_on_a_side(self):\n pass",
"def throw(self):\n self.side = random.randint(1, self.num_sides)",
"def get_random_node(self):\n if random.randint(0, 100) > self.goal_sample_rate:\n random_node = self.Node(\n random.uniform(self.min_rand, self.max_rand),\n random.uniform(self.min_rand, self.max_rand),\n )\n else: # goal point sampling\n random_node = self.Node(self.end.x, self.end.y)\n return random_node",
"def get_random_link(self):\n return tuple([random.randint(0, d-1) for d in self.link_idxs])",
"def choose_starting_points(self, side):\n # Left Side\n if side == 1:\n x = np.random.uniform(self.left_side[\"x_min\"], self.left_side[\"x_max\"])\n y = np.random.uniform(self.left_side[\"y_min\"], self.left_side[\"y_max\"])\n # Bottom\n elif side == 2:\n x = np.random.uniform(self.bottom[\"x_min\"], self.bottom[\"x_max\"])\n y = np.random.uniform(self.bottom[\"y_min\"], self.bottom[\"y_max\"])\n # Right Side\n elif side == 3:\n x = np.random.uniform(self.right_side[\"x_min\"], self.right_side[\"x_max\"])\n y = np.random.uniform(self.right_side[\"y_min\"], self.right_side[\"y_max\"])\n # Top\n elif side == 4:\n x = np.random.uniform(self.top[\"x_min\"], self.top[\"x_max\"])\n y = np.random.uniform(self.top[\"y_min\"], self.top[\"y_max\"])\n else:\n raise ValueError(\"Invalid number for sides!\")\n\n return x, y",
"def getRandomPipe():\n pipeHeight = GAME_PHOTOS['pipe'][0].get_height()\n offset = SCREEN_HEIGHT/3\n y2 = offset + random.randrange(0, int(SCREEN_HEIGHT - GAME_PHOTOS['base'].get_height() - 1.2 *offset))\n pipeX = SCREEN_WIDTH + 10\n y1 = pipeHeight - y2 + offset\n pipe = [\n {'x': pipeX, 'y': -y1}, #upper Pipe\n {'x': pipeX, 'y': y2} #lower Pipe\n ]\n return pipe",
"def choose_next(self, round):\n return random.choice(self.possible_coords)",
"def random(self):\n adj = self.adjacent()\n self.switch(random.choice([pos for pos in adj if self.in_grid(pos) and pos != self.prev]))",
"def findRandomFloorNode(self):\n\t\tx = random.randint(0, self.width - 1)\n\t\ty = random.randint(0, self.height - 1)\n\t\twhile not self.isFloor(x, y):\n\t\t\tx = random.randint(0, self.width - 1)\n\t\t\ty = random.randint(0, self.height - 1)\n\t\treturn (x, y)",
"def getRandomPipe():\r\n\r\n\r\n\tpipeHeight = GAME_SPIRTES['pipe'][0].get_height()\r\n\ty2 = offset + random.randrange(0, int(SCREENHEIGHT-GAME_SPIRTES['base'].get_height()) - 1.2*offset)\r\n\ty1 = pipeHeight - y2 +offset\r\n\tpipe = [\r\n\t{'x':pipeX,'y':-y1},\r\n\t{'x':pipeX,'y':y2}\r\n\r\n\t]\r\n\treturn pipe",
"def pick_random_route(self, potential_solution):\n route = random.choice(potential_solution.lining)\n return route",
"def random_adjacent_tile(self):\n adj = self.adjacent()\n pos_list = [pos for pos in adj if self.in_grid(pos) and pos != self.prev]\n return random.choice(pos_list)",
"def generate_child(self, parent1, parent2):\n if np.random.random() < self.crossover_prob:\n # crossover\n x, y = np.sort(np.random.randint(len(parent1), size=2))\n return np.vstack((parent1[:x], parent2[x:y], parent1[y:]))\n else:\n # mutation\n return self.generate_conformations()[0]",
"def RandomCoordinate(): \r\n return ReturnRounded(np.random.uniform(-10,10))",
"def roll(self):\n\n rnum = random.randint(0, len(self.sides) - 1)\n return self.sides[rnum]",
"def _randomize(self):\n return self.graph",
"def random_neighbors(self) -> int:\n return self.__random_neighbors",
"def obtener_vertice_aleatorio (self):\n return random.choice(list(self.vertices.keys()))",
"def random_element(self) -> 'PFElement':\n return random.choice(list(iter(self)))",
"def random_position(self):\n\t\treturn (random.randint(1, self.max_x-2), random.randint(1,self.max_y-2))",
"def getRandomPipe():\n # y of gap between upper and lower pipe\n gapY = random.randrange(int(BASEY * 0.5), int(BASEY * 0.8))\n\n pipeX = SCREEN_WIDTH - 10\n\n return [{'x': pipeX, 'y': gapY}] # lower pipe",
"def random_location(self):\n return random.choice(self.locations_list)",
"def get_random_site(self):\n return tuple([random.randint(0, d-1) for d in self.site_idxs])",
"def random_location(self, normal=True):\n if normal:\n return distribute_normally(x1=self.x1, x2=self.x2, y1=self.y1, y2=self.y2)\n else:\n raise NotImplementedError",
"def roll(self):\n return random.randint(1,self.sides)\n #return int(self.sides*random.random() + 1.0)",
"def random_location(self):\r\n\r\n while True:\r\n pt = (random.uniform(self.worldbox.tl[0], self.worldbox.br[0]),\r\n random.uniform(self.worldbox.tl[1], self.worldbox.br[1]))\r\n if not self.is_wall(pt) and not self.is_target(pt):\r\n return pt",
"def random_pipe():\r\n pipe_height = GAME_SPRITES['pipe'][0].get_height()\r\n offset = SCREENHEIGHT/3\r\n position_for_lower_pipe_at_y = random.randrange(0, int(SCREENHEIGHT - GAME_SPRITES['base'].get_height() - 1.2 * offset))\r\n pipe_x = SCREENWIDTH * 10\r\n position_for_upper_pipe_at_y = pipe_height - position_for_lower_pipe_at_y + offset\r\n pipe = [\r\n {'x': pipe_x, 'y': position_for_upper_pipe_at_y},\r\n {'x': pipe_x, 'y': position_for_lower_pipe_at_y}\r\n ]\r\n return pipe",
"def roll(self):\n\t\treturn randint(1, self.num_sides)",
"def random_neighbor(node, topology):\n return np.random.choice(neighbors(node=node, topology=topology))",
"def targetpoint(self, initpoint):\n while True:\n col = int(random.uniform(0, COLS))\n row = int(random.uniform(0, ROWS))\n if (row, col) != initpoint:\n break\n return (row, col)"
] | [
"0.70703185",
"0.6206867",
"0.61404693",
"0.5760542",
"0.5755857",
"0.5748231",
"0.56564564",
"0.56409484",
"0.56029654",
"0.5594463",
"0.5553292",
"0.5547132",
"0.55098593",
"0.5449766",
"0.544954",
"0.5447059",
"0.5444465",
"0.54438233",
"0.5443404",
"0.54398537",
"0.54397625",
"0.53916794",
"0.5374154",
"0.5371318",
"0.5343157",
"0.53417945",
"0.5334484",
"0.5334311",
"0.5326861",
"0.5315491"
] | 0.832961 | 0 |
Loads a .py module from github (raw) Returns a module object | def get_module_from_github(url):
with urlopen(url) as response:
if response.code == 200:
text = str(response.read(), encoding="utf-8")
_, path = mkstemp(suffix=".py", text=True)
with open(path, mode='wt', encoding='utf-8') as fh:
fh.write(text)
directory, file_name = os.path.split(path)
working_dir = os.getcwd()
os.chdir(directory)
module = __import__(file_name[:-3])
os.chdir(working_dir)
os.remove(path)
return module | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_module(self, fullname):\n LOGGER.info('Loading module {0}'.format(fullname))\n if fullname in sys.modules:\n return sys.modules[fullname]\n\n splitted_names = fullname.split('.')\n if 'github' in splitted_names:\n if len(splitted_names) >= 3:\n self.username = splitted_names[splitted_names.index('github') + 1]\n if len(splitted_names) >= 4:\n self.repository_name = splitted_names[splitted_names.index('github') + 2]\n\n if self.username and self.repository_name:\n self.clone_github_repo()\n\n if len(splitted_names) == 2:\n return super().load_module(fullname)\n if len(splitted_names) == 3:\n username_directory = os.path.join(MODULES_PATH, 'github', self.username)\n if not os.path.exists(username_directory):\n os.mkdir(username_directory)\n init_filename = os.path.join(username_directory, '__init__.py')\n open(init_filename, 'a').close()\n return super().load_module(fullname)\n if len(splitted_names) >= 4:\n module = super().load_module(fullname)\n parent, _, current_module = fullname.rpartition('.')\n root_modules = [\n 'packyou.github.{0}.{1}'.format(self.username, self.repository_name),\n 'packyou.github.{0}.{1}.{1}'.format(self.username, self.repository_name)\n ]\n LOGGER.info('Current module is {0}'.format(current_module))\n if fullname in root_modules:\n self.root_module = fullname\n sys.modules[current_module] = module\n return module\n\n else:\n ipdb.set_trace()\n module = super().load_module(fullname)\n sys.modules[fullname] = module\n if not module:\n raise ImportError\n return module",
"def main(raw_args):\n parser = argparse.ArgumentParser()\n parser.add_argument('--module-link')\n parser.add_argument('module_path', type=os.path.realpath)\n args = parser.parse_args(raw_args)\n\n return md_module(\n load_module_from_path(args.module_path), module_link=args.module_link)",
"def load_module(name, path):\n loader = importlib.machinery.SourceFileLoader(name, path)\n module = types.ModuleType(loader.name)\n loader.exec_module(module)\n return module",
"def parse_python_module(module_path, name):\n\n module = imp.load_source(name, module_path)\n\n if module is None:\n log.e(TAG, \"Error launching module '%s'.\" % name)\n return None\n\n try:\n mod_class = getattr(module, name)\n mod_inst = mod_class()\n\n except AttributeError:\n log.e(TAG, \"Unable to find class '%s' in module!\" % name)\n return None\n\n item = dtf.core.item.Item()\n item.type = dtf.core.item.TYPE_MODULE\n item.name = name\n item.local_name = module_path\n item.install_name = name\n item.author = mod_inst.author\n item.about = mod_inst.about\n\n version = mod_inst.version\n if version is not None:\n if dtf.core.item.is_valid_version(version):\n item.version = version\n else:\n log.e(TAG, \"Invalid version specified. Exiting.\")\n return None\n else:\n item.version = None\n\n # Remove the compiled file name\n compiled_python_file = \"%sc\" % module_path\n if os.path.isfile(compiled_python_file):\n os.remove(compiled_python_file)\n\n return item",
"def load_module(module_name: str, module_path: str) -> object:\n spec = module_util.spec_from_file_location(module_name, module_path)\n module = module_util.module_from_spec(spec)\n spec.loader.exec_module(module) # type: ignore\n return module",
"def download_module(self, abs_name):\n self._log.info(\"Downloading module {}\".format(abs_name))\n\n path = abs_name.replace(\".\", \"/\")\n filepath = path + \".py\"\n\n info = self._git_show(path)\n\n if info is None:\n info_test = self._git_show(path + \".py\")\n if info_test is None:\n raise ImportError(abs_name)\n info = info_test\n\n self._download(path, info=info)\n\n return None",
"def load_mod_from_file(self, fpath):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tfpath = os.path.abspath(fpath)\n\t\tfile_ext = os.path.splitext(os.path.split(fpath)[-1])[-1]\n\t\tif file_ext.lower() != '.py':\n\t\t\treturn\n\t\twith open(fpath) as f:\n\t\t\tcontent = f.read().splitlines()\n\t\tok = False\n\t\tfor line in content:\n\t\t\tif line.strip() == 'from shutit_module import ShutItModule':\n\t\t\t\tok = True\n\t\t\t\tbreak\n\t\tif not ok:\n\t\t\tself.log('Rejected file: ' + fpath,level=logging.DEBUG)\n\t\t\treturn\n\t\t# Note that this attribute will only be set for 'new style' module loading, # this should be ok because 'old style' loading checks for duplicate # existing modules.\n\t\t# TODO: this is quadratic complexity\n\t\texistingmodules = [\n\t\t\tm for m in self.shutit_modules\n\t\t\tif getattr(m, '__module_file', None) == fpath\n\t\t]\n\t\tif existingmodules:\n\t\t\tself.log('Module already seen: ' + fpath,level=logging.DEBUG)\n\t\t\treturn\n\t\t# Looks like it's ok to load this file\n\t\tself.log('Loading source for: ' + fpath,level=logging.DEBUG)\n\n\t\t# Add this directory to the python path iff not already there.\n\t\tdirectory = os.path.dirname(fpath)\n\t\tif directory not in sys.path:\n\t\t\tsys.path.append(os.path.dirname(fpath))\n\t\t# TODO: use bytearray to encode?\n\t\tmod_name = base64.b32encode(fpath.encode()).decode().replace('=', '')\n\t\tpymod = imp.load_source(mod_name, fpath)\n\n\t\t# Got the python module, now time to pull the shutit module(s) out of it.\n\t\ttargets = [\n\t\t\t('module', self.shutit_modules), ('conn_module', self.conn_modules)\n\t\t]\n\t\tself.build['source'] = {}\n\t\tfor attr, target in targets:\n\t\t\tmodulefunc = getattr(pymod, attr, None)\n\t\t\t# Old style or not a shutit module, nothing else to do\n\t\t\tif not callable(modulefunc):\n\t\t\t\treturn\n\t\t\tmodules = modulefunc()\n\t\t\tif not isinstance(modules, list):\n\t\t\t\tmodules = [modules]\n\t\t\tfor module in modules:\n\t\t\t\tsetattr(module, '__module_file', fpath)\n\t\t\t\tShutItModule.register(module.__class__)\n\t\t\t\ttarget.add(module)\n\t\t\t\tself.build['source'][fpath] = open(fpath).read()",
"def import_pymodule(scheme):\n if not SchModule._ready:\n raise ValueError(u\"not mounted\")\n\n p = SchModule.DIR.hpath(scheme)\n p = path.join(p, SchModule.PYMODULE)\n p = p.encode(sys.getfilesystemencoding())\n # In load_source(name, path): name is name of module (without extension),\n # path is full path to the file of module\n return imp.load_source(path.splitext(SchModule.PYMODULE)[0], p)",
"def load_module(module_name, root_dir):\n module_filepath = os.path.join(root_dir, module_name)\n python_version = sys.version_info[:2]\n\n module = None\n if python_version <= (2, 7):\n import imp\n module = imp.load_source(module_name, module_filepath)\n else:\n import importlib\n loader = importlib.machinery.SourceFileLoader(module_name, module_filepath)\n if python_version <= (3, 4):\n module = loader.load_module()\n else:\n spec = importlib.util.spec_from_loader(loader.name, loader)\n module = importlib.util.module_from_spec(spec)\n loader.exec_module(module)\n\n return module",
"def LoadModule(filename):\n (name, ext) = os.path.splitext(filename)\n\n fh = open(filename, \"r\")\n try:\n return imp.load_module(name, fh, filename, (ext, \"r\", imp.PY_SOURCE))\n finally:\n fh.close()",
"def load_module(path):\n spec = spec_from_file_location(\"module.name\", path)\n module = module_from_spec(spec)\n try:\n spec.loader.exec_module(module)\n except Exception as err:\n # ToDo: Append functions found from spec.loader.get_code(\"module.name\")\n # To some hidden attribute of the module object to be returned.\n warn(f'Exception when loading module {path}: \\n{err}')\n return module",
"def test_load_simple_module():\n loader = Loader()\n main_fname = loader.load(\"https://gist.githubusercontent.com/miohtama/80391980c2e73b285cfe/raw/dd89a55497ba33a6014453d9bb7432ab424c01cf/kivyhello.py#main\")\n mod = path_to_mod_name(main_fname)\n result = loader.run(mod, \"hello\")\n assert result == \"Hello there\"\n loader.close()",
"def as_module(file_path, name):\n\n with lock:\n with open(file_path, 'U') as module_file:\n prev = sys.dont_write_bytecode\n sys.dont_write_bytecode = True\n module = imp.load_module(name, module_file, file_path, (\".py\", 'U', imp.PY_SOURCE))\n sys.dont_write_bytecode = prev\n sys.modules[name] = module\n return module",
"def parse_module(filename: str) -> Module:\n state = get_state()\n contents = \"\"\n if str(filename)[-2:] == \"/-\":\n contents, _, __ = decode_bytes(sys.stdin.buffer.read())\n elif not os.path.isfile(filename):\n raise Exception(\"file does not exist\")\n else:\n with open(filename, encoding=\"utf-8\", errors=\"replace\") as file_to_read:\n contents += file_to_read.read()\n try:\n ast = ast3.parse(contents)\n except: # noqa\n out(\"error in parsing\", color=\"red\")\n if state.ignore_exceptions:\n sys.exit(0)\n module = Module(ast, filename) # type: ignore\n return module",
"def import_module_from_module_path(path):\n return SourceFileLoader('', path).load_module()",
"def load_module(file_name):\n path = temp.relpath(file_name)\n m = _load_module(path)\n logger.info(\"load_module %s\", path)\n return m",
"def loadModule(mod):\n try:\n # from pyrominfo import gameboy, etc\n pyrominfo = __import__(\"pyrominfo\", globals(), locals(), [mod])\n except ImportError:\n import os\n parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n os.sys.path.insert(0, parentdir)\n pyrominfo = __import__(\"pyrominfo\", globals(), locals(), [mod])\n try:\n return getattr(pyrominfo, mod)\n except AttributeError:\n raise ImportError(\"testutils.loadModule() can't find module %s in pyrominfo package\" % mod)",
"def fetch_and_import(self,\n script_path,\n domain=\"raw.githubusercontent.com\",\n urlpath=_GITHUB_URLPATH):\n def import_file_directly(path):\n \"\"\"Import a file at :path: directly, bypassing __import__.\"\"\"\n name = \"local_module_\" + re.sub(r\"[\\./]\", \"_\", path)\n return imp.load_source(name, path)\n\n # First try to find the script locally in the\n # current working directory.\n info = self.fetch_script(script_path, domain, urlpath)\n key = \"{0}/{1}/{2}\".format(domain, urlpath, script_path)\n\n try:\n return self._module_cache[key]\n except KeyError:\n # We try to import the file normally first - this is useful\n # for tests where we want to be able to get coverage on those\n # files. If we can't import directly, then we need to\n # fall back to importing the file.\n fs_path = info.fs_path\n if info.in_scripts_dir:\n try:\n name = os.path.relpath(os.path.splitext(fs_path)[0],\n start=self._scripts_dir)\n name = name.replace(os.path.sep, \".\")\n self._module_cache[key] = importlib.import_module(name)\n except ImportError:\n self._module_cache[key] = import_file_directly(fs_path)\n else:\n self._module_cache[key] = import_file_directly(fs_path)\n\n return self._module_cache[key]",
"def import_source(module_name):\n module_file_path = module_name.__file__\n module_name = module_name.__name__\n\n module_spec = importlib.util.spec_from_file_location(module_name, module_file_path)\n module = importlib.util.module_from_spec(module_spec)\n module_spec.loader.exec_module(module)\n print(dir(module))\n\n msg = \"The {module_name} module has the following methods:{methods}\"\n print(msg.format(module_name=module_name, methods=dir(module)))",
"def load(module_name):\r\n if module_name.startswith('http://'):\r\n pico_url, module_name = module_name.split('/pico/')\r\n global url\r\n url = pico_url + '/pico/'\r\n module_dict = get(url + module_name)\r\n module = imp.new_module(module_name)\r\n module.__doc__ = module_dict['__doc__']\r\n functions = module_dict['functions']\r\n for function_def in functions:\r\n name = function_def['name']\r\n args = function_def['args']\r\n args_string = ', '.join([\"%s=%s\"%(arg, json.dumps(default).replace(\"null\", \"None\")) for arg, default in args if arg != None])\r\n stream = function_def['stream']\r\n docstring = function_def['doc']\r\n exec(\"\"\"\r\ndef f(%s):\r\n \\\"\\\"\\\" %s \\\"\\\"\\\"\r\n return _call_function('%s', '%s', locals(), %s)\r\n\"\"\"%(args_string, docstring, module_name, name, stream))\r\n setattr(module, name, f)\r\n return module",
"def load(cls, src: str):\n # Load check-point\n state_dict = torch.load(src)\n\n # Create a new module\n specifics = state_dict.pop(\"additional_state\")\n configuration = specifics[\"configuration\"]\n module = Net(**configuration)\n\n # Restore state\n module.load_state_dict(state_dict)\n\n # End\n return module",
"def get_module(filename_with_path):\n try:\n with open(filename_with_path) as config_file:\n Module.temp_path = filename_with_path\n this_module = yaml.load(config_file, Loader=Loader)\n Module.temp_path = \"\"\n return this_module\n except IOError:\n raise ModulePathError(filename_with_path)\n except yaml.scanner.ScannerError:\n raise ModuleConstraintParseError(\"Parsing of module {} failed. This is likely caused by a typo in the file.\"\n \"\".format(filename_with_path))",
"def load_module(file_name):\n mod_name = file_module_name(file_name)\n spec = imputil.spec_from_file_location(mod_name, file_name)\n if spec is None:\n raise ImportError(f'cannot import from {file_name!r}')\n mod = imputil.module_from_spec(spec)\n spec.loader.exec_module(mod)\n return mod",
"def import_(filename):\n (path, name) = os.path.split(filename)\n (name, ext) = os.path.splitext(name)\n try:\n return sys.modules[name]\n except KeyError:\n pass\n try:\n file, filename, data = imp.find_module(name, [path])\n except ImportError:\n print('No module {} found'.format(name))\n try:\n mod = imp.load_module(name, file, filename, data)\n return mod\n except UnboundLocalError:\n pass\n finally:\n # Since we may exit via an exception, close fp explicitly.\n try:\n if file:\n file.close()\n except UnboundLocalError:\n if not os.path.exists(path):\n os.makedirs(path)\n from shutil import copyfile\n if os.name == 'nt':\n copyfile(os.path.join(path_to_module, 'models\\myfitmodels.py'), filename)\n else:\n copyfile(os.path.join(path_to_module, './models/myfitmodels.py'), filename)\n # open(filename, 'a').close()",
"def load_module(self, file_path: Path) -> Module:\n if file_path.suffix != \".wasm\":\n raise Exception(\"Unsupported file type: {file_path.suffix}\")\n\n with file_path.open(\"rb\") as wasm_file:\n try:\n module = parse_module(wasm_file)\n except ParseError as err:\n raise MalformedModule from err\n\n try:\n validate_module(module)\n except ValidationError as err:\n raise InvalidModule from err\n\n return module",
"def load_module (self, name):\n module = sys.modules.get (name)\n if module is not None:\n return module\n\n containment = self.containments.get (name)\n if containment is None:\n raise ImportError ('No such module: \\'{}\\''.format (name))\n source, filename, ispkg = containment\n\n module = imp.new_module (name)\n module.__loader__ = self\n module.__file__ = filename\n if ispkg:\n module.__path__ = [os.path.dirname (filename)]\n module.__package__ = name\n else:\n module.__package__ = name.rpartition ('.') [0]\n\n module.__initializing__ = True\n sys.modules [name] = module\n try:\n Exec (compile (source, module.__file__, 'exec'), module.__dict__)\n return module\n except Exception:\n sys.modules.pop (name, None)\n raise\n finally:\n module.__initializing__ = False",
"def quick_import(module_name, path=None, build=False, suffix=\"so\", with_html=False, re_build_func=re_build_func,\n re_build_func_kwargs=None\n ):\n\n def_pwd(path)\n print(\"Move to {}\".format(os.getcwd()))\n\n if build:\n if re_build_func_kwargs is None:\n re_build_func(module_name, with_html)\n else:\n re_build_func(**re_build_func_kwargs)\n\n ext = [i for i in os.listdir() if module_name in i and suffix in i]\n if len(ext) > 0:\n module = import_module(module_name, os.getcwd())\n msg = \"The {module_name} module methods:{methods}\"\n names = dir(module)\n names = [i for i in names if \"__\" not in i]\n print(msg.format(module_name=module_name, methods=names))\n return module\n else:\n raise FileNotFoundError(\": There is no './{}.***.{}' in '{}',\\n\".format(module_name, suffix, path),\n \"There are just {},\\n\".format(os.listdir()),\n \"Please try to build=Ture again.\")",
"def import_file(name: Text, file_path: Text):\n\n spec = spec_from_file_location(f\"luh3417.{name}\", file_path)\n module = module_from_spec(spec)\n spec.loader.exec_module(module)\n\n return module",
"def load_module(module_name, file_name):\n from importlib.machinery import SourceFileLoader\n home_dir = os.path.expanduser(\"~\")\n valid_paths = [\n os.path.join(home_dir, \"Google Drive\"),\n os.path.join(home_dir, \"GoogleDrive\"),\n os.path.join(os.path.join(home_dir, \"Desktop\"), \"Google Drive\"),\n os.path.join(os.path.join(home_dir, \"Desktop\"), \"GoogleDrive\"),\n os.path.join(\"C:/\", \"GoogleDrive\"),\n os.path.join(\"C:/\", \"Google Drive\"),\n os.path.join(\"D:/\", \"GoogleDrive\"),\n os.path.join(\"D:/\", \"Google Drive\"),\n ]\n\n drive_path = None\n for path in valid_paths:\n if os.path.isdir(path):\n drive_path = path\n break\n\n if drive_path is None:\n logger_lib = None\n print(\"Logger library not found in shared repo.\", flush = True)\n #raise Exception(\"Couldn't find google drive folder!\")\n else: \n utils_path = os.path.join(drive_path, \"_pyutils\")\n print(\"Loading [{}] package...\".format(os.path.join(utils_path,file_name)),flush = True)\n logger_lib = SourceFileLoader(module_name, os.path.join(utils_path, file_name)).load_module()\n print(\"Done loading [{}] package.\".format(os.path.join(utils_path,file_name)),flush = True)\n\n return logger_lib",
"def load_module(module_name, file_name):\n from importlib.machinery import SourceFileLoader\n home_dir = os.path.expanduser(\"~\")\n valid_paths = [\n os.path.join(home_dir, \"Google Drive\"),\n os.path.join(home_dir, \"GoogleDrive\"),\n os.path.join(os.path.join(home_dir, \"Desktop\"), \"Google Drive\"),\n os.path.join(os.path.join(home_dir, \"Desktop\"), \"GoogleDrive\"),\n os.path.join(\"C:/\", \"GoogleDrive\"),\n os.path.join(\"C:/\", \"Google Drive\"),\n os.path.join(\"D:/\", \"GoogleDrive\"),\n os.path.join(\"D:/\", \"Google Drive\"),\n ]\n\n drive_path = None\n for path in valid_paths:\n if os.path.isdir(path):\n drive_path = path\n break\n\n if drive_path is None:\n logger_lib = None\n print(\"Logger library not found in shared repo.\", flush = True)\n #raise Exception(\"Couldn't find google drive folder!\")\n else: \n utils_path = os.path.join(drive_path, \"_pyutils\")\n print(\"Loading [{}] package...\".format(os.path.join(utils_path,file_name)),flush = True)\n logger_lib = SourceFileLoader(module_name, os.path.join(utils_path, file_name)).load_module()\n print(\"Done loading [{}] package.\".format(os.path.join(utils_path,file_name)),flush = True)\n\n return logger_lib"
] | [
"0.6863036",
"0.65888363",
"0.64832926",
"0.6392326",
"0.637052",
"0.6313743",
"0.62788546",
"0.62697667",
"0.62596184",
"0.6259035",
"0.6255723",
"0.6192319",
"0.6162507",
"0.61570686",
"0.6147943",
"0.6132673",
"0.6108426",
"0.60681444",
"0.6066579",
"0.60329056",
"0.59926844",
"0.59819114",
"0.5966361",
"0.59549844",
"0.59045607",
"0.5897155",
"0.5895391",
"0.5894443",
"0.5872373",
"0.5872373"
] | 0.78711605 | 0 |
retuns the quote's text with tagged part of quote chunks | def serialize_quote(self):
partofs = PartOfQuote.objects.filter(part_of=self)
quote = self.text
for x in partofs:
quote = quote.replace(x.text, create_tag(x))
return quote | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def block_quote(self, text):\n return [\"<blockquote>\"] + text",
"def process_quote_text(quote_text):\n quote_text = quote_text.replace('―', '').replace('\\n\\n', '\\n')\n quote_text = quote_text[:-1] if quote_text[-1] == '\\n' else quote_text\n for char in HTML:\n quote_text = quote_text.replace(*char)\n return quote_text",
"def text_of_quotation(exp):\n return cadr(exp)",
"def get_text_hook(raw):\n soup = bs4.BeautifulSoup(quopri.decodestring(raw), features=\"lxml\")\n return soup.text",
"def add_smart_quotes(article: Article) -> Article:\n text_tag: bs4.NavigableString\n for text_tag in article.content.find_all(text=True):\n #\\1 will sub in the first matched group\n new_tag = re.sub(r'\"([^\"]*)\"', r'“\\1”', text_tag)\n text_tag.replace_with(new_tag)\n return article",
"def getQuote(body):\n lines = []\n body = quote_remove.sub('', body) # Remove quoted lines\n\n if len(body) > max_sentence_buffer*2:\n for match in p.finditer(body):\n short_line = \"\"\n\n # Handle case of match being larger than sentence buffer\n safe_start = match.start()\n if (match.end() - match.start() > max_number_length):\n safe_start = match.end() - max_number_length\n\n a = max(0,safe_start-max_sentence_buffer)\n b = min(len(body),match.end()+max_sentence_buffer)\n # print(a,b)\n\n short_line = body[a:b]\n\n # Append quote initializer to each double newline (markdown newline)\n short_line = '\\n\\n> '.join(short_line.split('\\n\\n'))\n\n if a == 0:\n s_a = ''\n else:\n s_a = '... '\n if b == len(body):\n s_b = ''\n else:\n s_b = ' ...'\n\n lines.append(s_a+short_line+s_b)\n else:\n lines = [body]\n \n quote = [\"> \" + p.sub(r'**\\1**', line) + \"\\n\" for line in lines]\n return quote",
"def initial_quotes(self, text):\n\n quote_finder = re.compile(r\"\"\"\n ( # Start group capture\n (\"|“|&\\#8220;) # A double quote\n | # Or\n ('|‘|&\\#8216;) # A single quote\n ) # End group capture\n \"\"\", re.VERBOSE)\n\n replace_function = lambda match: \"\"\"<span class=\"%s\">%s</span>\"\"\"\\\n % ('dquo' if match.group(2) else 'quo', match.group(1))\n text = quote_finder.sub(replace_function, text, 1) \n \n return text",
"def summon_text(quote_mess):\n\n # clean up to get an individual quote out of the poorly formatted soup\n # goodreads does not format their webpages in a way that is scraping-friendly\n quotes = []\n for quote in quote_mess:\n to_trim = str(quote)\n trimmed = to_trim[30:]\n this_quote = ''\n for char in trimmed:\n if char == '<':\n break\n else:\n this_quote = this_quote + char\n quotes.append(this_quote)\n\n # clean up the line breaks and unnecessary punctuation\n # without this step, we would end up with random punctuation and unreliable chaining\n quote_list = []\n for quote in quotes:\n cleaned = quote.replace(\"\\n\", '')\n purged = re.sub(r'(\\.|,|:|;|\\(|\\)|\\\"|\\?|”|“|!)', '', cleaned)\n quote_list.append(purged)\n\n # create a final clean list of all the words available\n word_list = []\n for index in range(0, len(quote_list) - 1):\n quote = quote_list[index]\n words = quote.split(' ')\n for word in words:\n # just checking if we have a first person word or not\n if word != 'I' or word[:2] != \"I'\":\n word_list.append(word.lower())\n else:\n word_list.append(word)\n\n return word_list",
"def process_body(text):\n # if text != None:\n if text is not None:\n soup = BeautifulSoup(str(text), 'html.parser')\n try:\n soup.find('blockquote').decompose()\n contained_quote = True\n\n except AttributeError:\n contained_quote = False\n\n cleaned = soup.get_text()\n cleaned = unicodedata.normalize(\"NFKD\", cleaned)\n\n return cleaned, contained_quote\n else:\n cleaned = float(\"nan\")\n contained_quote = float(\"nan\")\n return cleaned, contained_quote",
"def test_parse_quotes(self):\n quote = api.parse_quote(\" This is a quote. | Author | Publication | tag1, tag2 , tag3 \",\n simple_format=False)\n self.assertEqual(\"This is a quote.\", quote.quote)\n self.assertEqual(\"Author\", quote.author)\n self.assertEqual(\"Publication\", quote.publication)\n self.assertEqual(3, len(quote.tags))",
"def parts_of_speech_tags(self, tokenized_doc):\n return [(token.text, token.pos_) for token in self.parser(\n tokenized_doc)]",
"def parse_content(self, api):\n abstract = ''\n for tag in api.next_siblings:\n if tag.name == 'hr' or tag.name == 'blockquote':\n break\n elif tag.name == 'div':\n continue\n else:\n abstract+=(str(tag))\n return abstract.strip('\\n')",
"def format_rich_quote(rich_text_quote):\n rich_text = format_rich_text(rich_text_quote)\n return \"> \" + \"\\n> \".join(rich_text.split(\"\\n\")) + \"\\n\"",
"def extract_references(text):\n open = u\"\\u201C\"\n close = u\"\\u201D\"\n undir_quote_strs = re.findall(r'\\\"(.+?)\\\"',text.decode('UTF-8', errors = 'replace'))\n dir_quote_strs=re.findall(r''+open+'(.+?)'+close+'',text.decode('UTF-8',errors='replace'))\n if len(undir_quote_strs) > len(dir_quote_strs):\n return undir_quote_strs #Some documents use undirected quotes\n return dir_quote_strs # Some documents use directed quotes",
"def __yahoo_parse_text(self, content):\n text = ''\n # Process all paragraphs.\n paragraphs = content.find_all('p')\n for par in paragraphs:\n text += '<p>' + par.getText(separator=' ') + '</p>'\n # Remove all extra whitespace (single space remains).\n text = ' '.join(text.strip().split())\n # Result\n return text",
"def __getTagText(self, tag):\n return ''.join(tag.findAll(text=True)).replace(unichr(160), ' ')",
"def tags(self):\n # See also. Sentence.__repr__().\n ch, I,O,B = self.chunk, INSIDE+\"-\", OUTSIDE, BEGIN+\"-\"\n tags = [OUTSIDE for i in range(len(self.sentence.token))]\n for i, tag in enumerate(self.sentence.token): # Default: [WORD, POS, CHUNK, PNP, RELATION, ANCHOR, LEMMA]\n if tag == WORD:\n tags[i] = encode_entities(self.string)\n elif tag == POS and self.type:\n tags[i] = self.type\n elif tag == CHUNK and ch and ch.type:\n tags[i] = (self == ch[0] and B or I) + ch.type\n elif tag == PNP and self.pnp:\n tags[i] = (self == self.pnp[0] and B or I) + \"PNP\"\n elif tag == REL and ch and len(ch.relations) > 0:\n tags[i] = [\"-\".join([str(x) for x in [ch.type]+list(reversed(r)) if x]) for r in ch.relations]\n tags[i] = \"*\".join(tags[i])\n elif tag == ANCHOR and ch:\n tags[i] = ch.anchor_id or OUTSIDE\n elif tag == LEMMA:\n tags[i] = encode_entities(self.lemma or \"\")\n elif tag in self.custom_tags:\n tags[i] = self.custom_tags.get(tag) or OUTSIDE\n return tags",
"def split_tagged_text_into_chunks(text, *a, **kw):\n return split_tagged_text_into_chunks(text, *a, **kw)",
"def extractText(postSoup):\n for tag in postSoup.findAll(True):\n if tag.name in (\"code\"):\n tag.extract()\n else:\n tag.hidden=True\n\n return postSoup.renderContents()",
"def __getText(cls, node):\n\n pieces = []\n code = cls.MARKUP.get(node.tag)\n if code:\n pieces.append(f\"{{\\\\{code} \")\n if node.text is not None:\n pieces.append(fix(node.text))\n for child in node.findall(\"*\"):\n pieces.append(cls.__getText(child))\n if child.tail is not None:\n pieces.append(child.tail)\n if code:\n pieces.append(\"}\")\n return \"\".join(pieces)",
"def tag(self, text):\n\t\tpass",
"def split_tagged_text_into_chunks(text):\n if not sentinel_d.get(\"repatt1\"):\n patt1 = r\"(<t(?:ag)?.*?(?<=/)(?:t(?:ag)?)?>)\"\n sentinel_d.update(\n repatt1=re.compile(patt1, flags=re.IGNORECASE | re.DOTALL)\n )\n return [chunk for chunk in sentinel_d[\"repatt1\"].split(text) if chunk]",
"def extract_text(soup, result):\n if soup:\n for t in soup.children:\n if type(t) == NavigableString:\n # Text content node\n result.append(t)\n elif isinstance(t, NavigableString):\n # Comment, CDATA or other text data: ignore\n pass\n elif t.name in whitespace_tags:\n # Tags that we interpret as whitespace, such as <br> and <img>\n result.append_whitespace()\n elif t.name in block_tags:\n # Nested block tag\n result.begin() # Begin block\n extract_text(t, result)\n result.end() # End block\n elif t.name not in exclude_tags:\n # Non-block tag\n extract_text(t, result)",
"def get_quote_and_movie_name():\n html_content = urlopen(MOVIE_QUOTE_SOURCE).read().decode('utf-8')\n soup = BeautifulSoup(html_content, 'html.parser')\n results = soup.find_all(attrs={'class': 'col-xs-9 col-lg-10'})\n quote_regex = re.compile('<blockquote>(.*?)</blockquote>')\n movie_regex = re.compile('</strong>(.*?)</span>')\n movie_em_regex = re.compile('<em>(.*?)</em>')\n movie_regex_second = re.compile('</strong>(.*?)</a>')\n last_results = []\n\n for result in results:\n\n quote_line = str(result.find('blockquote')).replace('\\n', '')\n quote = quote_regex.findall(quote_line)[0].strip()\n movie_line = str(result.find_all(attrs={'class': 'source'})[0])\n try:\n movie_name = movie_regex.findall(movie_line)[0].strip()\n except:\n movie_name = movie_regex_second.findall(movie_line)[0].strip()\n if '<em>' in movie_name:\n movie_name = movie_em_regex.findall(movie_name)[0].strip()\n\n last_results.append((quote, movie_name))\n\n return random.choice(last_results)",
"def get_extras(self, text=None):\n if text is None:\n text = self.nltk_text(self.text)\n # Tag parts of speech\n tagged = nltk.pos_tag(text)\n # Try for composed NNP / NNPS\n is_proper_noun = False\n text = []\n proper_noun = \"\"\n for (word, tag) in tagged:\n if not is_proper_noun and (tag == 'NNP' or tag == 'NNPS'):\n # Start building a proper noun\n proper_noun = word\n # Set it true\n is_proper_noun = True\n # Add it to annotations anyway\n text.append(word)\n elif tag == 'NNP' or tag == 'NNPS':\n # Previous was proper noun. So it may be combined\n proper_noun += \" \" + word\n # Add the single word to annotations anyway\n text.append(word)\n elif is_proper_noun and tag == 'IN':\n # Add what we have by now to the text\n text.append(proper_noun)\n # Previous was proper noun. So it may be composed\n proper_noun += \" \" + word\n elif is_proper_noun:\n # Add what we have by now to the text\n text.append(proper_noun)\n # Finished with proper noun, so set it false\n is_proper_noun = False\n # Remove duplicates\n seen = {}\n result = []\n for w in text:\n if w in seen:\n continue\n seen[w] = 1\n result.append(w)\n # Eliminate common\n result = [w for w in result if w.lower() not in self.common_words and\n w.lower() not in stopwords.words('english')]\n return result",
"def get_text():\n global x\n for i in soup.body(\"aside\", {\"id\": \"text-2\"}):\n x = i.get_text()",
"def __syntax(self, text):\n s = pattern.en.parsetree(text, relations = True, lemmata = True)\n text_chunks = []\n for sentence in s:\n out = \"\"\n for chunk in sentence.chunks:\n out += str(chunk.type)\n text_chunks.append(out)\n text_chunks_out = [\" \".join(text_chunks)]\n return (self.chunk_vectorizer.transform(text_chunks_out),)",
"def _get_text(raw_html):\n bs = BeautifulSoup(raw_html)\n text_nodes = bs.find_all(_is_text_tag)\n text_elements = [_get_child_text(node) for node in text_nodes]\n return ' '.join(chain(*chain(*text_elements)))",
"def collect_content(parent_tag):\n content = ''\n for tag in parent_tag:\n p_tags = tag.find_all('p')\n for tag in p_tags:\n content += tag.text + '\\n'\n return content",
"def parse(cls, path):\n if not cls.can_ingest(path):\n raise Exception('File extension not compatible')\n\n quotes = []\n\n doc = docx.Document(path)\n\n for para in doc.paragraphs:\n if para.text != '':\n parsed = para.text.split(' - ')\n if len(parsed) > 1:\n new_quote = QuoteModel(author=parsed[1], body=parsed[0])\n quotes.append(new_quote)\n\n return quotes"
] | [
"0.6213396",
"0.6097698",
"0.6085885",
"0.6053056",
"0.5988718",
"0.5977862",
"0.5800565",
"0.5786364",
"0.57424855",
"0.57417625",
"0.5605103",
"0.5602675",
"0.55760896",
"0.55452555",
"0.5529984",
"0.5528037",
"0.5527219",
"0.5504466",
"0.5502221",
"0.5453675",
"0.54405576",
"0.54253507",
"0.539358",
"0.5377735",
"0.53770834",
"0.5365872",
"0.53500205",
"0.53428894",
"0.5338937",
"0.5302375"
] | 0.6584478 | 0 |
Increase the number of xor gateways (split + join) | def inc_xor_gateways(self):
self.num_xor_gateways += 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def xor(a, b):",
"def __init__(self, width, partition_points):\n super().__init__(width, partition_points, XORCombiner, \"xor\")",
"def xor_network():\n # fmt: off\n tpm = np.array([\n [0, 0, 0],\n [0, 1, 1],\n [1, 0, 1],\n [1, 1, 0],\n [1, 1, 0],\n [1, 0, 1],\n [0, 1, 1],\n [0, 0, 0],\n ])\n cm = np.array([\n [0, 1, 1],\n [1, 0, 1],\n [1, 1, 0],\n ])\n # fmt: on\n return Network(tpm, cm=cm, node_labels=LABELS[:tpm.shape[1]])",
"def test_execute_xor(new_network):\n network = new_network\n devices = network.devices\n names = devices.names\n\n [SW1_ID, SW2_ID, XOR1_ID, I1, I2] = names.lookup(\n [\"Sw1\", \"Sw2\", \"Xor1\", \"I1\", \"I2\"])\n\n # Make devices\n devices.make_device(XOR1_ID, devices.XOR)\n devices.make_device(SW1_ID, devices.SWITCH, 0)\n devices.make_device(SW2_ID, devices.SWITCH, 0)\n\n # Make connections\n network.make_connection(SW1_ID, None, XOR1_ID, I1)\n network.make_connection(SW2_ID, None, XOR1_ID, I2)\n\n network.execute_network()\n assert new_network.get_output_signal(XOR1_ID, None) == devices.LOW\n\n # Set Sw1 to HIGH\n devices.set_switch(SW1_ID, devices.HIGH)\n network.execute_network()\n assert network.get_output_signal(XOR1_ID, None) == devices.HIGH\n\n # Set Sw2 to HIGH\n devices.set_switch(SW2_ID, devices.HIGH)\n network.execute_network()\n assert network.get_output_signal(XOR1_ID, None) == devices.LOW",
"def xor_inplace(a,b):",
"def double_xor(it):\n\n return [xor(it[2*i:2*i+2]) for i in range(len(it)/2)]",
"def my_xor(a_list, b_list):\n for a, b in zip(a_list, b_list):\n y = int(a, 2) ^ int(b, 2)\n yield ('{0:b}'.format(y)).zfill(len(a))",
"def addition_mod(a, b, nbr):\n bina = [int(x) for x in bin(a)[2:]]\n binb = [int(x) for x in bin(b)[2:]]\n binn = [int(x) for x in bin(nbr)[2:]]\n #print(binn)\n while len(bina) >= len(binb):\n binb = [0]+binb\n while len(bina) < len(binb)-1:\n bina = [0]+bina\n while len(binn) < len(bina):\n binn = [0]+binn\n while len(binn) > len(bina):\n bina = [0]+bina\n binb = [0]+binb\n binn.reverse()\n bina.reverse()\n binb.reverse()\n #print(bina, binb, binn)\n n = len(bina)+len(binb)+len(binn)\n na = len(bina)\n nab = len(bina)+len(binb)\n q = QuantumRegister(n+2, 'q')\n circ = QuantumCircuit(q)\n for i in range(na):\n if bina[i]:\n circ.x(q[i])\n for i in range(len(binb)):\n if binb[i]:\n circ.x(q[na+i])\n for i in range(len(binn)):\n if binn[i]:\n circ.x(q[nab+i])\n addmod(circ, q, # A, B, lost, last, N, lost2, binn):\n [q[i] for i in range(len(bina))],\n [q[i+na] for i in range(len(binb)-1)],\n q[n],\n q[na+len(binb)-1],\n [q[i+nab] for i in range(len(binn))],\n q[n+1],\n binn)\n circ_m = measure(circ, q, [i for i in range(na,nab)])\n return circ_m",
"def __xor__(self, other):\n a, b = Trits.match_length(self, other)\n return Trits([x ^ y for x, y in zip(a, b)])",
"def xorbits(num1,num2):\n thingstoadd = []\n for i in range(31):\n bit1=setbit(num1,i)\n bit2=setbit(num2,i)\n bit1=shiftleft(bit1,31 - i)\n bit2=shiftleft(bit2,31 - i)\n bitsum=add(bit1,bit2)\n bitsum=shiftright(bitsum,31 - i)\n thingstoadd.append(bitsum)\n return sum(thingstoadd)",
"def __xor__(self, other):\r\n if self.field.characteristic == 2:\r\n return runtime.xor(self, other)\r\n\r\n return super().__xor__(other)",
"def ADD_ROUND_KEY(state_array, key_array):\n for i in range(4):\n for j in range(4):\n state_array[i][j] = state_array[i][j] ^ key_array[i][j]\n return state_array",
"def _xorSelection(self):\n\n self._console_output(\"XOR'ing selected bytes...\")\n self.ba.xor_patcher()",
"def inc_para_gateways(self):\r\n self.num_para_gateways += 1",
"def __init__(self, x):\n self.bit = x\n for i in range(len(x)):\n j = i | (i + 1)\n if j < len(x):\n x[j] += x[i]",
"def xor(self):\n\n \"\"\" fisrt i pick element we need to xor each other and put theme in list\"\"\"\n bits_to_xor = []\n for i in self.xor_input:\n bits_to_xor.append(self.state[i])\n\n \"\"\" next xor the list elemet usin reduce with lambda func.\"\"\"\n res = reduce(lambda x, y: x ^ y, bits_to_xor)\n return res",
"def __xor__(self, other):\r\n return self + other - 2 * self * other",
"def test_swap_gate_nondeterministic_waltz_basis_gates(self):\n shots = 2000\n circuits = ref_2q_clifford.swap_gate_circuits_nondeterministic(final_measure=True)\n targets = ref_2q_clifford.swap_gate_counts_nondeterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='u1,u2,u3,cx')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)",
"def break_single_key_xor(b1):\n\n max_score = None\n result_plaintext = None\n key = None\n\n for i in range(256):\n b2 = [i] * len(b1)\n plaintext = bytes(xor(bytearray(b1), b2))\n line_score = score(plaintext)\n\n if line_score > max_score or not max_score:\n max_score = line_score\n result_plaintext = plaintext\n key = chr(i)\n return key, result_plaintext",
"def combinedSwitch(house, district, count, bCursor, temperature, howMany, coolingRate, sa, combiSize = 4):\n currentCosts = district.calculateCosts()\n\n # probeer 10 keer een random combinatie van huizen van dezelfde batterij te vinden\n while howMany < combiSize:\n while bCursor < len(district.batteries)-1:\n b = district.batteries[bCursor]\n if b != house.connection:\n while count < 10:\n lookForMultiSwitch(sa, b, howMany, house, district, currentCosts, \\\n temperature, coolingRate)\n count += 1\n bCursor += 1\n howMany += 1\n\n if howMany < 4:\n howMany += 1\n combinedSwitch(house, district, count, 0, currentCosts, howMany)",
"def __rxor__(self, other):\n return self.runtime.xor(self, other)",
"def test_bit_xor(self):\n value = bytearray([1])\n ops = [bitwise_operations.bit_xor(self.test_bin_ones, 0, 8, 1, value, None)]\n\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([0] * 1 + [1] * 4)\n assert bins[self.test_bin_ones] == expected_result",
"def test_bit_xor_multiple_bytes(self):\n value = bytearray([8] * 5)\n ops = [bitwise_operations.bit_xor(self.five_255_bin, 0, 40, 5, value, None)]\n\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([247] * 5)\n assert bins[self.five_255_bin] == expected_result",
"def XOR_up_to_number(number):\n r = number % 4\n xor = None\n if r is 0:\n xor = number\n elif r is 1:\n xor = 1\n elif r is 2:\n xor = number + 1\n else:\n xor = 0\n return xor",
"def increasing_mutations(nets, probs, desc):\n inverse = [] # After the \"while\", this variable will contain the list of networks in reverse order of \"candidateness\" to be mutated.\n\n # Decide the place in which the mutation is performed\n\n while len(nets) != 0: # While there are networks which have not been added to the inverse list\n net = nets[np.argmax(probs)] # Determine the \"most useless\" network still available\n cands = [net] + desc.comp_by_input(net) + desc.comp_by_output(net) # Candidates to be added to the inverse list (the most useless one and the ones around it, because they dont need more modeling poer, as the \"most useless network appears to be not needed)\n inverse += [x for x in cands if (x not in inverse) and (\"i\" not in x) and (\"o\" not in x)] # Add to inverse list if they are networks and are not wet in\n\n # Update original lists.\n probs = [probs[i] for i in range(len(nets)) if nets[i] not in inverse]\n nets = [nets[i] for i in range(len(nets)) if nets[i] not in inverse]\n\n for comp in reversed(inverse): # Try mutation near the networks according to the previous arrangement (could happen that some places cannot fit mutations).\n\n reaching_outs = list(set([x for x in desc.reachable[comp] if \"o\" in x])) # Outputs affected by the mutation\n _, in_conns, out_conns, _ = desc.get_net_context(comp) # Connections near the selected network (which could be affected by the mutation)\n conns = in_conns + out_conns # Checka si esto da error\n for mutation in np.random.permutation([\"add_con\", \"divide_con\"]): # Try both mutations in case the first one does not work\n res, trainables = mutate(mutation, desc, comp, conns)\n if res != -1:\n return trainables, res, mutation, comp, reaching_outs # If the mutation is successful, return, else try the second mutation or the next network.",
"def makeBinaryChains():\n\t\n\t# retrieve the binding partner specifications\n\t(maxsize,types) = getTypes()\n\t\n\t# Do some basic argument checking for this model\n\tif (len(types) < 2):\n\t\tprint \"Number of defined types must equal two for binary chain calculations.\"\n\t\treturn\n\tif (maxsize == 0):\n\t\tprint \"Must specify a valid maximum number for one or more components.\"\n\t\treturn\n\n\tallChains = []\n\tnewChainsA = [[]]\n\tnewChainsB = []\n\t\n\ttypeA = types[0]\n\ttypeB = types[1]\n\t\n\t# start the chain with a single type A component\n\taddComponent(newChainsA[0],typeA,0,0)\n\n\tdepth = 0\n\tfor n in range(maxsize):\n\t\tdepth+=1\n\t\t\n\t\t# go through all the chains created last iteration and append B components\n\t\tnewChainsB = []\n\t\tfor thisChain in newChainsA:\n\n\t\t\t# get a list of new available sites in the provided chain\n\t\t\t# by setting depth -1, we will only add to components added last round\n\t\t\topenSites = makeSiteList(thisChain,typeB,depth-1)\n\t\t\t\n\t\t\t# make all the descendants from the current chain and append them to the pool\n\t\t\tif (n == 0) and (typeA['sym']): #if the starting binder is symmetric, no need to start chains at all its sites\n\t\t\t\tnewChainsB = newChainsB + fillSites(openSites,thisChain,typeB,-1)\n\t\t\telse:\n\t\t\t\tnewChainsB = newChainsB + fillSites(openSites,thisChain,typeB,depth)\n\t\t\n\t\tprint('n:'+str(n)+', '+str(len(newChainsB))+ ' chains created at depth '+str(depth))\n\t\t\n\t\tallChains = allChains + newChainsB\n\t\t\n\t\tdepth+=1\n\t\t\n\t\t# add an additional component to all the previously modified chains\n\t\tnewChainsA = []\n\t\tfor thisChain in newChainsB:\n\n\t\t\topenSites = makeSiteList(thisChain,typeA,depth-1)\n\t\t\tnewChainsA = newChainsA + fillSites(openSites,thisChain,typeA,depth)\n\t\t\t\n\t\tprint('n:'+str(n)+', '+str(len(newChainsA))+ ' chains created at depth '+str(depth))\n\t\t\n\t\tallChains = allChains + newChainsA\n\n\treturn allChains",
"def test_bit_xor_multiple_bytes_value_unchanged(self):\n value = bytearray([0])\n ops = [bitwise_operations.bit_xor(self.test_bin_zeroes, 7, 8, 1, value, None)]\n\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([0] * 5)\n assert bins[self.test_bin_zeroes] == expected_result",
"def test_swap_gate_deterministic_waltz_basis_gates(self):\n shots = 100\n circuits = ref_2q_clifford.swap_gate_circuits_deterministic(final_measure=True)\n targets = ref_2q_clifford.swap_gate_counts_deterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='u1,u2,u3,cx')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)",
"def xor(it):\n return 0 if it[0]==it[1] else 1",
"def __ixor__(self, y):\n if is_tensor(y) or isinstance(y, int):\n if self.rank == 0:\n self.share ^= y\n elif isinstance(y, BinarySharedTensor):\n self.share ^= y.share\n else:\n raise TypeError(\"Cannot XOR %s with %s.\" % (type(y), type(self)))\n return self"
] | [
"0.6017773",
"0.5604052",
"0.55942357",
"0.55624527",
"0.5541649",
"0.55383646",
"0.55364746",
"0.55166155",
"0.546064",
"0.5433929",
"0.5414978",
"0.5412313",
"0.5385111",
"0.53839195",
"0.5364891",
"0.5353381",
"0.5341885",
"0.53325903",
"0.53294843",
"0.5320235",
"0.52835536",
"0.5283083",
"0.5278848",
"0.5278403",
"0.5262968",
"0.5241425",
"0.5232424",
"0.5210277",
"0.5204206",
"0.5192455"
] | 0.7862372 | 0 |
Increase the number of tau transitions | def inc_tau_trans(self):
self.num_tau_trans += 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _kendall_tau_add(self, len_old: int, diff_pos: int, tau_old: float):\n return 2.0 / (len_old + 1) * (float(diff_pos) / len_old - tau_old)",
"def _kendall_tau_add(self, len_old, diff_pos, tau_old):\n return 2./(len_old+1)*(float(diff_pos)/len_old-tau_old)",
"def tau_turnover(self):\n return self.tau_batch/self.N_trains",
"def cool(self):\n self.t = self.t - 1",
"def increment(self):\n self._deltas += 1",
"def increment_steps(self):\n self.num_steps += 1",
"def timesGoBy(self):\n self.wcount += 1",
"def addTN(self, num=1):\n self.tn += num",
"def simulationTwoDrugsDelayedTreatment(numTrials):\n # TODO",
"def setNumTimeSubSteps(*argv):",
"def run(self, n):\n new_trajectories = self.enumerate_trajectories(self.gpm.Graph, n, self.source, self.target, max_iter=self.max_iter)\n self._trajectories += new_trajectories",
"def up(self, i):\n pass",
"def increment_number_served(self, numbers):\n\t\tself.number_served += numbers",
"def add_tau_task(bpmn, counts):\r\n from pm4py.objects.bpmn.bpmn_graph import BPMN\r\n counts.inc_tau_trans()\r\n tau_name = \"tau_\" + str(counts.num_tau_trans)\r\n tau_task = BPMN.Task(name=tau_name)\r\n bpmn.add_node(tau_task)\r\n counts.append_tau(tau_task)\r\n return bpmn, tau_task, counts",
"def test_tau(self):\n tau_values = [5.0, 15.0, 25.0]\n \n tmax = 50.0\n dt = 0.1\n N = 3\n\n ini_rate = 80.0\n\n nsteps = int_r(tmax/dt)\n\n # reproducible arbitrariness\n np.random.seed(34342)\n\n tutor_out_trace = ini_rate + 20.0*np.random.randn(nsteps, N)\n # have some correlation between reward trace and tutor.out trace\n rho = 0.2\n reward_trace = (rho*(tutor_out_trace[:, 0] - ini_rate)/20.0 +\n (1-rho)*np.random.randn(nsteps))\n \n scaling = None\n\n for crt_tau in tau_values:\n tutor = SimpleNeurons(N, out_fct=lambda i: tutor_out_trace[i])\n reward = MockReward(lambda t: reward_trace[int_r(t/dt)])\n tutor_rule = ReinforcementTutorRule(tutor, reward, tau=crt_tau,\n constrain_rates=False, ini_rate=ini_rate, learning_rate=1.0,\n use_tutor_baseline=False)\n\n sim = simulation.Simulation(tutor, reward, tutor_rule, dt=dt)\n sim.run(tmax)\n\n drates = tutor_rule.rates - ini_rate\n\n # this should be a convolution of tutor_out_trace*reward_trace with an\n # exponential with time constant crt_tau\n # that means that tau*(d/dt)drates + drates must be proportional to it\n expected_rhs = (tutor_out_trace - ini_rate)*np.reshape(reward_trace,\n (-1, 1))\n\n lhs = np.vstack((float(crt_tau)*np.reshape(drates[0, :], (1, -1))/dt,\n (crt_tau/dt)*np.diff(drates, axis=0) + drates[:-1, :]))\n \n # allow scaling to be arbitrary, but *independent of tau*\n if scaling is None:\n mask = (expected_rhs != 0)\n scaling = np.mean(lhs[mask]/expected_rhs[mask])\n\n # scaling shouldn't be negative or zero!\n self.assertGreater(scaling, 1e-9)\n\n mag = np.mean(np.abs(expected_rhs))\n\n self.assertLess(np.max(np.abs(lhs - scaling*expected_rhs)), 1e-6*mag)",
"def increment_number_served(self, increment):\n self.number_served += increment",
"def increment_number_served(self, increment):\n self.number_served += increment",
"def simulate(self, ntrs):\n self.trtimes = list(np.arange(ntrs)*self.expectedtr)",
"def increase_time(self,s):\n self.days += 1\n if self.disease_status > 0:\n self.time_since_infection += 1\n if self.days == 365:\n self.increase_age(s)",
"def incr(n=1):\n for i in xrange(n):\n pulse_hi(INCR)",
"def addTP(self, num=1):\n self.tp += num",
"def update_lambda_T(self):\n\n # Set rate if we're at the beginning\n if self.vars['step'] <= 1:\n self.vars['lambdaDecayRate'] = self.settings['lambdaDecayRate']\n self.vars['TDecayRate'] = self.settings['TDecayRate']\n\n incremental_lambda = 1 - \\\n (1-self.vars['lambdaDecayRate'])**self.vars['dt']\n incremental_temp = 1-(1-self.vars['TDecayRate'])**self.vars['dt']\n\n # Update lambda\n self.vars['lambda'] -= incremental_lambda*(\n self.vars['lambda']-self.settings[\"lambdaMin\"])\n\n # Update T\n self.vars['T'] -= incremental_temp * \\\n (self.vars['T']-self.settings[\"TMin\"])",
"def update_target_network(self, tau):\n for t, e in zip(\n self.target_network.trainable_variables, self.online_network.trainable_variables\n ):\n t.assign(t * (1-tau) + e * tau)",
"def num_trials(self):",
"def update_temperature(self):\n self.iteration += 1 \n self.T = self.T0 * 0.9935**self.iteration",
"def setswitchinterval(n): # real signature unknown; restored from __doc__\n pass",
"def ramp_up(self):\n value = self.current_event[\"ramp_up\"][\"value\"]\n self.current_value.append(self.current_value[-1] + value)",
"def tau_plus(self,x,n=50):\n sigma = np.zeros(n,dtype=np.int8)\n for k in range(n):\n if x>=self.rho:\n sigma[k] = 1\n x = self.f1(x)\n else:\n sigma[k] = 0\n x = self.f0(x)\n return sigma",
"def increase(self):\n self.counter[0] += 1\n\n for x in range(len(self.sequences) -1):\n if self.counter[x] == len(self.sequences[x]) + 1:\n self.counter[x] = 0\n self.counter[x+1] += 1",
"def increment(self, inc):\n self.done += inc"
] | [
"0.6267952",
"0.6195915",
"0.5895552",
"0.58890814",
"0.58763903",
"0.5635565",
"0.5554859",
"0.5525284",
"0.5503111",
"0.5492844",
"0.5483372",
"0.5452114",
"0.54350936",
"0.5365188",
"0.5355348",
"0.5350318",
"0.5350318",
"0.53370744",
"0.52750194",
"0.52710396",
"0.5266659",
"0.52450544",
"0.5220466",
"0.5209721",
"0.5209018",
"0.520148",
"0.5195454",
"0.5191903",
"0.51862794",
"0.51861286"
] | 0.81626916 | 0 |
Create a task with the specified label in the BPMN | def add_task(bpmn, counts, label):
from pm4py.objects.bpmn.bpmn_graph import BPMN
task = BPMN.Task(name=label)
bpmn.add_node(task)
return bpmn, task, counts | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_task():",
"def create_task(self, name, value):\n pass",
"def add_task():\n # get values from user\n responses = accept_inputs([\"Task label\", \"Short task description\", \"Parent task label\"])\n # insert into db\n query_no_results(\"insert into task values(?, ?, ?)\",\n [responses[\"Task label\"], responses[\"Short task description\"], responses[\"Parent task label\"]])\n print(\"New task created\")",
"def create_task(text):\n new_task = Tasks(task_text=text) \n new_task.save()",
"def create_task_description (r, msg) :\n\n task_descr = troy.TaskDescription ()\n task_descr.tag = \"%s\" % r\n task_descr.executable = '/bin/echo'\n task_descr.arguments = ['Hello', msg, r, '!']\n task_descr.working_directory = \"%(home)s/troy_demo\"\n\n return task_descr",
"def create_task(self, name, target, config=None, comment=\"\"):\n\n if not config:\n config = \"Full and fast\"\n\n request = \"\"\"<create_task>\n <name>%s</name>\n <comment>%s</comment>\n <config id=\"%s\"/>\n <target id=\"%s\"/>\n </create_task>\"\"\" % (name, comment, config, target)\n\n return self.make_xml_request(request, xml_result=True).get(\"id\")",
"def add_task_to_task():\n # get task label from user\n responses = accept_inputs([\"Task label\"])\n child_label = responses[\"Task label\"]\n # check for existence of task\n results = query_with_results(\"select * from task where label = ?\", [child_label])\n if len(results) == 0:\n print(\"No task found with label '%s' that we could use.\" % child_label)\n return\n # get task label from user\n responses = accept_inputs([\"New parent task label\"])\n parent_label = responses[\"New parent task label\"]\n # check for existence of task\n results = query_with_results(\"select * from task where label = ?\", [parent_label])\n if len(results) == 0:\n print(\"No task found with label '%s' that we could use.\" % parent_label)\n return\n # update the task to remove the parent\n query_no_results(\"update task set parent = ? where label = ?\", [parent_label, child_label])\n print(\"Set parent of task with label '%s' to task with label '%s'.\" % (child_label, parent_label))",
"def task(self, name):\n pass",
"def create_task():\n client = RequestManager()\n client.set_method(\"POST\")\n client.set_endpoint(\"/projects/{0}/stories/{1}/tasks\".format(STORED_ID['project_id'], STORED_ID['story_id']))\n name = \"\".join(choices(string.ascii_letters, k=6))\n body = {\"description\": name}\n client.set_body(json.dumps(body))\n response = client.execute_request()\n try:\n STORED_ID['task_id'] = response.json()['id']\n except KeyError:\n LOGGER.info(response.json())",
"def tasks_create(self, name, labels, bug, resource_type, resources, image_quality, frame_filter, **kwargs):\n url = self.api.tasks\n data = {'name': name,\n 'labels': labels,\n 'bug_tracker': bug,\n 'image_quality': image_quality,\n 'frame_filter': frame_filter\n }\n response = self.session.post(url, json=data)\n response.raise_for_status()\n response_json = response.json()\n log.info('Created task ID: {id} NAME: {name}'.format(**response_json))\n log.info(str(response.json()))\n self.tasks_data(response_json['id'], resource_type, resources)",
"def newTask(name, description, assigner, id=None, priority=None, submitter_email=None, whose=None):\n if whose:\n user_id = jutdaapi.find_user(whose)\n if not user_id:\n raise ValueError('bad whose assignment: '+str(whose))\n #title = name + ' for: '+assigner.title()\n # that was the old scheme\n title = '('+assigner.title()+') '+name\n\n if priority != None:\n #priority = (int(priority) + 2) / 2\n priority = int(priority)\n RA_queue = 3\n #if assigner != 'no one':\n # description += '<tasktrackermeta assigner=\"'+assigner+'\"/>'\n if isinstance(id, str):\n description += '<tasktrackermeta id=\"'+id+'\"/>'\n ticket_id = jutdaapi.create_ticket(RA_queue, title, description,\n priority=priority, submitter_email=submitter_email)\n # Is there a race condition here? In this kind of database\n # I would assume not.\n time.sleep(1)\n ticket = jutdaapi.get_detailed_ticket(ticket_id)\n t = ticketToTask(ticket)\n return t",
"def new(self,target,name = \"\", prio = 10, period = 0, time2run = 0):\n newtask = Task(target,name,prio,period, time2run)\n self.taskmap[newtask.tid] = newtask\n self.schedule(newtask)\n return newtask.tid",
"def taskdetail_create(name, tsk, td_id=None):\n return IMPL.taskdetail_create(name, tsk, td_id)",
"def create_label(self, org, name):\n pass",
"def create_task(self, name, task_info=None):\n task = TaskInst(name=name).save()\n has_task_param = {\n 'super_role': SUPER_ROLE.OWNER,\n 'acceptance': ACCEPTANCE.ACCEPT\n }\n self.tasks.connect(task, has_task_param)\n start = StepInst(name='Start', node_type=NODE_TYPE.START, pos_x=-START_END_OFFSET).save()\n end = StepInst(name='End', node_type=NODE_TYPE.END, pos_x=START_END_OFFSET).save()\n task.steps.connect(start)\n task.steps.connect(end)\n task.update(task_info)\n return task",
"def _create_task(self, body, *, task_cls=Task):\n return task_cls(self, body)",
"def create_task(author, title, text, **kwargs):\n mc = MathContent(text=text)\n mc.save()\n task = Task(author=author, name=title, content=mc, **kwargs)\n task.save()\n return task",
"def create_label(self, name: str):\n return create_label(self.api_key, name)",
"def generate_tasks(self, task):",
"def new_task():\n req = request.json\n if 'cmd' in req:\n id = mongo.db.tasks.insert({\n 'cmd' : req['cmd'],\n 'status' : 'Not started'\n })\n\n response = {'id' : str(id)}\n return response",
"def test_create_metering_label(self):\r\n resource = 'metering_label'\r\n cmd = metering.CreateMeteringLabel(\r\n test_cli20.MyApp(sys.stdout), None)\r\n name = 'my label'\r\n myid = 'myid'\r\n description = 'my description'\r\n args = [name, '--description', description, '--shared']\r\n position_names = ['name', 'description', 'shared']\r\n position_values = [name, description, True]\r\n self._test_create_resource(resource, cmd, name, myid, args,\r\n position_names, position_values)",
"def create(profile, cluster, task_definition, started_by=None, count=None):\n client = boto3client.get(\"ecs\", profile)\n params = {}\n params[\"cluster\"] = cluster\n params[\"taskDefinition\"] = task_definition\n if started_by:\n params[\"startedBy\"] = started_by\n if count:\n params[\"count\"] = count\n return client.run_task(**params)",
"def create(self, task_model):\n raise NotImplementedError()",
"def add_task_to_pbi(tfs_instance, task, pbi_data):\n task['System.AreaId'] = pbi_data['System.AreaId'] # Area Path\n task['System.IterationId'] = pbi_data['System.IterationId'] # Iteration Path\n try:\n new_task = tfs_instance.add_workitem(task,\n pbi_data.id,\n workitem_type=\"Task\") # Add a new task\n except requests.exceptions.HTTPError as error:\n print(f'Oops.. there was an HTTP error: {error}')\n return\n print(f'Task {str(new_task)} was added successfully')",
"def register_new_task(self, task):\n self.all_tasks.add(task)\n print(f\"Task registered in loadbalancer {task.task_id} description {task.description}\")",
"def create(self, label_id):\n data = {\n 'type': 'tagit',\n 'rate_count': 0,\n 'rate_range': 'day',\n 'limit_count': 0,\n 'limit_range': 'day',\n 'schedule': [],\n 'enabled': True,\n 'args': {\n 'sn': label_id,\n 'tag_sn': label_id\n }\n }\n # Yes, it's confusing. the `/actions/` endpoint is used for tags, while\n # the /tags/ endpoint is used for labels.\n return self._post(\n request=ApiActions.CREATE.value,\n uri=ApiUri.ACTIONS.value,\n params=data\n )",
"def factory(self, taskname, *args, **kwargs):\n import etc\n return str(apply(etc.tasks[taskname], args, kwargs))",
"def create_task(event):\n manager = event.workbench.get_plugin('exopy.tasks')\n dialog = BuilderView(manager=manager,\n parent=event.parameters.get('parent_ui'),\n future_parent=event.parameters.get('future_parent'))\n result = dialog.exec_()\n if result:\n return dialog.config.build_task()\n else:\n return None",
"def register(self, name, taskdef, flags=gdef.TASK_CREATE, userid=None, password=None, logonType=gdef.TASK_LOGON_NONE, ssid=None):\n new_task = Task()\n\n if userid is None: userid = gdef.VARIANT() # Empty variant\n if password is None: password = gdef.VARIANT() # Empty variant\n if ssid is None: ssid = gdef.VARIANT() # Empty variant\n\n self.RegisterTaskDefinition(name, taskdef, flags, userid, password, logonType, ssid, new_task)\n return new_task",
"def create_label(**kwargs):\n Label = Entity.Label\n kwargs[Label.project] = project\n kwargs[Label.seconds_to_label] = kwargs.get(Label.seconds_to_label.name,\n 0.0)\n data = {\n Label.attribute(attr) if isinstance(attr, str) else attr:\n value.uid if isinstance(value, DbObject) else value\n for attr, value in kwargs.items()\n }\n query_str, params = query.create(Label, data)\n query_str = query_str.replace(\n \"data: {\", \"data: {type: {connect: {name: \\\"Any\\\"}} \")\n res = project.client.execute(query_str, params)\n return Label(project.client, res[\"createLabel\"])"
] | [
"0.74706125",
"0.73837596",
"0.67262894",
"0.66487724",
"0.660005",
"0.64948",
"0.6458578",
"0.642163",
"0.6415165",
"0.6370665",
"0.6368903",
"0.627783",
"0.6273318",
"0.6252123",
"0.62488365",
"0.6243665",
"0.62112993",
"0.61936545",
"0.6173977",
"0.61645275",
"0.6154962",
"0.614766",
"0.6146672",
"0.6140021",
"0.61312664",
"0.61118984",
"0.6091757",
"0.60888195",
"0.60876495",
"0.6075248"
] | 0.77884877 | 0 |
Parse the www/template.html and createsthe content of file lib/htmltemplate/htmlclasses.py | def parse(force=False):
from htmltemplate import WWW_DIR, TEMPLATE_FILE, TEMPLATE_PY
# pylint: disable=duplicate-string-formatting-argument
print("Parse html template")
lines = open(WWW_DIR+TEMPLATE_FILE).readlines()
pyClassFile = open(TEMPLATE_PY,"w")
pyClassFile.write("''' File automatically generated with template.html content '''\nfrom htmltemplate.template import Template \n")
stack = []
for line in lines:
if "<!--" in line:
spl = line.split("<!--")
if ":begin-->" in line:
classname = spl[1].split(":begin-->")[0]
stack.append([classname,"",""])
elif ":end-->" in line:
classname = spl[1].split(":end-->")[0]
if classname != stack[-1][0]:
raise SyntaxError()
classname, text, comment = stack.pop()
attributes, beginTag, endTag, beginFormat, endFormat = findall(r'\%\([A-Za-z_0-9]*\)s',text)
print("Html template update %s"%classname)
classattributes = set()
for attribute in attributes:
classattributes.add(attribute)
comment = comment.rstrip()
pyClassFile.write("""\n%s\n"""%comment)
if beginTag != "":
pyClassFile.write("""begTag%s = b'''%s'''\n"""%(classname,beginTag))
if endTag != "":
pyClassFile.write("""endTag%s = b'''%s'''\n"""%(classname,endTag))
pyClassFile.write("""def %s(*args, **params):\n"""%classname)
pyClassFile.write("""\tself = Template(*(("%s",) + args), **params)\n\n"""%classname)
pyClassFile.write("""\tdef getBegin(self):\n""")
if beginFormat == "":
if beginTag != "":
pyClassFile.write("""\t\tglobal begTag%s\n"""%classname)
pyClassFile.write("""\t\treturn begTag%s\n"""%(classname))
else:
pyClassFile.write("""\t\treturn b''\n""")
else:
pyClassFile.write("""\t\tglobal begTag%s\n"""%classname)
pyClassFile.write("""\t\treturn begTag%s%s(%s)\n"""%(classname, "\x25",beginFormat[:-1]))
pyClassFile.write("""\tself.getBegin = getBegin\n\n""")
pyClassFile.write("""\tdef getEnd(self):\n""")
if endFormat == "":
if endTag != "":
pyClassFile.write("""\t\tglobal endTag%s\n"""%classname)
pyClassFile.write("""\t\treturn endTag%s\n"""%(classname))
else:
pyClassFile.write("""\t\treturn b''\n""")
else:
pyClassFile.write("""\t\tglobal endTag%s\n"""%classname)
pyClassFile.write("""\t\treturn endTag%s%s(%s)\n"""%(classname, "\x25", endFormat[:-1]))
pyClassFile.write("""\tself.getEnd = getEnd\n\n""")
for attribute in classattributes:
if attribute in ["pattern"]:
pyClassFile.write('\tself.{:<12} = params.get("{}", b"*")\n'.format(attribute,attribute))
elif attribute in ["id","name"]:
pyClassFile.write('\tself.{:<12} = params.get("{}", b"%d"%id(self))\n'.format(attribute,attribute))
elif attribute in ["disabled","active"]:
pyClassFile.write('\tself.{:<12} = params.get("{}", False)\n'.format(attribute,attribute))
elif attribute in ["checked"]:
pyClassFile.write('\tself.{:<12} = params.get("{}", True)\n'.format(attribute,attribute))
else:
pyClassFile.write('\tself.{:<12} = params.get("{}", b"")\n'.format(attribute,attribute))
pyClassFile.write('\treturn self\n')
else:
raise SyntaxError()
else:
if line.strip() != "":
if len(stack) >= 1:
stack[-1][1] += line.strip()
stack[-1][2] += "# " +line.lstrip()
pyClassFile.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def html_template_file(self):\n pass",
"def create_page(self, data):\n env = Environment(loader=FileSystemLoader(self.template_folder), trim_blocks=True, lstrip_blocks=True)\n template = env.get_template(self.template_file_name)\n template_vars = {'class_name': self.get_class_name(data['name']), 'page': data}\n output = template.render(template_vars)\n formatted_output = output.encode('utf8').strip()\n file_name = data['name'] + self.get_output_file_type()\n result_html = open(os.path.join(self.output_folder, file_name), 'w')\n result_html.write(formatted_output)\n result_html.close()",
"def template(self):\n output=file(self.src, 'w').write\n output(\"\"\"%s\n<html>\n<head>\n<title>CHANGE ME</title>\n<meta http-equiv=\"Content-Type\" content=\"text/html; charset=iso-8859-15\">\n<link rel=\"STYLESHEET\" href=\"%s\" type=\"text/css\">\n</head>\n<body>\n\n<!--it-->\n <p>\n Pagina non disponibile in questa lingua!\n <FORM><INPUT TYPE=\"button\" VALUE=\"Indietro\" onClick=\"history.go(-1);return true;\"> </FORM>\n </p>\n<!--/it-->\n\n<!--en-->\n <p>\n Page not available in this language!\n <FORM><INPUT TYPE=\"button\" VALUE=\"Back\" onClick=\"history.go(-1);return true;\"> </FORM>\n </p>\n<!--/en-->\n\n</body>\n</html>\n\"\"\" % (self.doctype, self.css))",
"def __init__(self, website):\n self.website = website\n if website.paths.__ is None:\n self.defaults = {}\n self.master = None\n else:\n path = os.path.join(website.paths.__, 'etc', 'simplate.html')\n self.path = path\n if os.path.isfile(path):\n msg = message_from_file(open(self.path))\n body = msg.get_payload().decode(self.charset)\n self.defaults = dict()\n for key, val in msg.items():\n key = key.decode(self.charset)\n val = val.decode(self.charset)\n if not is_valid_identifier(key):\n raise BadKey(key, path)\n self.defaults[key] = val\n self.master = Template(body)\n else:\n self.defaults = {}\n self.master = None",
"def __init__(self,\n source_path='./*.py',\n template_path='./docs/templates/*_template.md',\n output_path='./docs/documentation.md',\n ignore=['extra']\n ):\n\n template_files = glob.glob(template_path)\n # filename = t.split('/')[-1]\n self.sources = {os.path.basename(s).split('.')[0]: os.path.normpath(s) for s in glob.glob(source_path) if not any(i in s for i in ignore)}\n self.templates = {os.path.basename(t).split('_')[0]: os.path.normpath(t) for t in template_files}\n self.output_path = output_path\n\n self.template_content = {}\n for k, v in self.templates.items():\n path = v\n with open(path, 'r') as template_file:\n self.template_content[k] = template_file.read()\n\n self.text = ''\n self.classes = []\n self.headers = ['Params', 'Returns', 'Attributes']\n self.hierarchy = [\n 'class',\n 'method',\n 'parameter',\n 'pinfo',\n 'extra'\n ]\n self.tab_length = 6",
"def htAccessTmplContent( self, pars, directory ):\n\n return None",
"def render_template(self, template_path, vars=None):\n\n content = {'plain':'', 'html':''}\n\n with open(template_path, 'r') as h:\n template = Template(h.read())\n html = template.render(vars)\n\n soup = BeautifulSoup(html, \"html.parser\")\n\n # Generating plain text source from html source\n # The style tag and .link_button are removed\n for style in soup.select('style,.link_button'):\n style.extract()\n\n # Only keep the text inside the tags\n plain = ''.join(soup.findAll(text=True)).strip()\n\n content['html'] = html\n content['plain'] = plain\n\n return content",
"def create_html(text, template, output):\n\n # TODO uncomment this for orginal DMP format (right now difficult with differing section sizes)\n #templateLoader = jinja2.FileSystemLoader(searchpath=\"../templates/new\")\n templateLoader = jinja2.FileSystemLoader(searchpath=\"../templates\")\n templateEnv = jinja2.Environment(loader=templateLoader)\n TEMPLATE_FILE = \"template_\" + template.lower() + \".html\"\n real_template = templateEnv.get_template(TEMPLATE_FILE)\n\n outputText = real_template.render(contact=text)\n html_file = open(output + \".html\", \"w\")\n html_file.write(outputText)\n html_file.close()\n\n return output + \".html\"",
"def write_template_body1(template_filename):\n template_type = template_filename.split('/')[-1].split('_')[0]\n template_file = open(template_filename, 'a')\n template_file.write('<body>\\n') \n template_file.write('<div id=\"pageTitle\">\\n')\n template_file.write('<?php echo $stat_title; ?>\\n') \n template_file.write('</div>\\n')\n template_file.write('<div class=\"page-menu\"><div class=\"table\">\\n')\n template_file.write(' <div class=\"element\">\\n')\n template_file.write(' <span class=\"bold\">Basin:</span>\\n')\n template_file.write(\n ' <select id=\"maptype\" '\n +'onchange=\"changeMaptype(this.value)\"></select>\\n'\n )\n template_file.write(' </div>\\n')\n template_file.write(' <div class=\"element\">\\n')\n template_file.write(' <span class=\"bold\">Name:</span>\\n')\n template_file.write(\n ' <select id=\"domain\" '\n +'onchange=\"changeDomain(this.value);\"></select>\\n'\n )\n template_file.write(' </div>\\n')\n template_file.write(' <div class=\"element\">\\n')\n template_file.write(\n ' <span class=\"bold\">Forecast Lead:</span>\\n'\n )\n template_file.write(\n ' <select id=\"variable\" '\n +'onchange=\"changeVariable(this.value)\"></select>\\n'\n )\n template_file.write(' </div>\\n')\n template_file.write('</div></div>\\n')\n template_file.write('\\n')\n template_file.write('<!-- Middle menu -->\\n')\n template_file.write('<div class=\"page-middle\" id=\"page-middle\">\\n')\n template_file.write(\n 'Left/Right arrow keys = Change forecast lead | Up/Down arrow keys '\n +'= Change Storm\\n'\n )\n template_file.write(\n '<br>For information on tropical cyclone verification, '\n +'<button class=\"infobutton\" id=\"myBtn\">click here</button>\\n'\n )\n template_file.write('<div id=\"myModal\" class=\"modal\">\\n')\n template_file.write(' <div class=\"modal-content\">\\n')\n template_file.write(' <span class=\"close\">×</span>\\n')\n template_file.write(' Tropical Cyclone Verification Information\\n')\n template_file.write(\n ' <embed width=100% height=100% src=\"../main.php\">\\n'\n )\n template_file.write(' </div>\\n')\n template_file.write('</div>\\n')\n template_file.write('<!-- /Middle menu -->\\n')\n template_file.write('</div>\\n')\n template_file.write('\\n')\n template_file.write(\n '<div id=\"loading\"><img style=\"width:100%\" '\n +'src=\"../../images/loading.png\"></div>\\n'\n )\n template_file.write('\\n')\n template_file.write('<!-- Image -->\\n')\n template_file.write('<div id=\"page-map\">\\n')\n template_file.write(' <image name=\"map\" style=\"width:100%\">\\n')\n template_file.write('</div>\\n')\n template_file.write('\\n')\n template_file.write('<script type=\"text/javascript\">\\n')\n template_file.write('// Get the modal\\n')\n template_file.write('var modal = document.getElementById(\"myModal\");\\n')\n template_file.write('\\n')\n template_file.write('// Get the button that opens the modal\\n')\n template_file.write('var btn = document.getElementById(\"myBtn\");\\n')\n template_file.write('\\n')\n template_file.write('// Get the <span> element that closes the modal\\n')\n template_file.write(\n 'var span = document.getElementsByClassName(\"close\")[0];\\n'\n )\n template_file.write('\\n')\n template_file.write(\n '// When the user clicks the button, open the modal\\n'\n )\n template_file.write('btn.onclick = function() {\\n')\n template_file.write(' modal.style.display = \"block\";\\n')\n template_file.write('}\\n')\n template_file.write('\\n')\n template_file.write(\n '// When the user clicks on <span> (x), close the modal\\n'\n )\n template_file.write('span.onclick = function() {\\n')\n template_file.write(' modal.style.display = \"none\";\\n')\n template_file.write('}\\n')\n template_file.write('\\n')\n template_file.write(\n '// When the user clicks anywhere outside of the modal, close it\\n'\n )\n template_file.write('window.onclick = function(event) {\\n')\n template_file.write(' if (event.target == modal) {\\n')\n template_file.write(' modal.style.display = \"none\";\\n')\n template_file.write(' }\\n')\n template_file.write('}\\n')\n template_file.write(\n '//======================================================='\n +'=============================================\\n'\n )\n template_file.write('//User-defined variables\\n')\n template_file.write(\n '//======================================================='\n +'=============================================\\n'\n )\n template_file.write('\\n')\n template_file.write('//Global variables\\n')\n template_file.write(\n 'var minFrame = 0; //Minimum frame for every variable\\n'\n )\n template_file.write(\n 'var maxFrame = 26; //Maximum frame for every variable\\n'\n )\n template_file.write(\n 'var incrementFrame = 1; //Increment for every frame\\n'\n )\n template_file.write('\\n')\n template_file.write('var startFrame = 0; //Starting frame\\n')\n template_file.write('\\n')\n template_file.write('var cycle = 2018100600\\n')\n template_file.write('\\n')\n template_file.write('/*\\n')\n template_file.write(\n 'When constructing the URL below, DDD = domain, VVV = variable, '\n +'LLL = level, SSS = season, Y = frame number.\\n'\n )\n template_file.write(\n 'For X and Y, labeling one X or Y represents an integer '\n +'(e.g. 0, 10, 20). Multiple of these represent a string\\n'\n )\n template_file.write(\n 'format (e.g. XX = 00, 06, 12 --- XXX = 000, 006, 012).\\n'\n )\n template_file.write('*/\\n')\n template_file.write(\n 'var url = \"<?php echo $'+template_type+'_url; ?>\";\\n'\n )\n template_file.write('\\n')\n template_file.write(\n '//======================================================='\n +'=============================================\\n'\n )\n template_file.write('//Add variables & domains\\n')\n template_file.write(\n '//======================================================='\n +'=============================================\\n'\n )\n template_file.write('\\n')\n template_file.write('var variables = [];\\n')\n template_file.write('var domains = [];\\n')\n template_file.write('var levels = [];\\n')\n template_file.write('var seasons = [];\\n')\n template_file.write('var maptypes = [];\\n')\n template_file.write('var validtimes = [];\\n')\n template_file.write('\\n')\n template_file.write('\\n')\n template_file.close()",
"def __init__(self,template_file, **kwargs):\r\n \r\n env = Environment(\r\n loader=PackageLoader('email_generator', 'templates'),\r\n autoescape=select_autoescape(['html', 'xml'])\r\n )\r\n template = env.get_template(template_file)\r\n self.body = template.render(**kwargs)",
"def __init__(self, template):\n\n self.template = template\n self.parsed_template = {}",
"def parse_html(self):\n if self.file_extension == '.czm': # Caso de fichero comprimido czm.\n folder_path = extract_file(self.input_file) # Descomprime el archivo de entrada.\n self.html_path = find_extension(folder_path, '.html') # Busca el html en el directorio de extracción.\n else: # Caso de html proporcionado directamente.\n self.html_path.append(self.input_file)\n if not self.html_path: # En caso de que no exista ningún html.\n raise IOError('html file not found.')\n for path in self.html_path: # Almacena cada uno de los html parseados en un diccionario.\n html_file = open(path, encoding=\"utf8\") # Almacena los datos del html.\n parsed_html = BeautifulSoup(html_file, \"lxml\") # Hay que instalar lxml.\n self.parsed_html_dic.update({os.path.splitext(os.path.basename(path))[0]:parsed_html})",
"def _load_template(name: str) -> str:\n html_tpl = _read_text(name + '.html')\n import re\n\n # line breaks are not needed\n html_tpl = html_tpl.replace('\\n', '')\n # remove comments\n html_tpl = re.sub(r'<!--(.|\\s|\\n)*?-->', '', html_tpl)\n # remove space around special characters\n html_tpl = re.sub(r'\\s*([><])\\s*', r'\\1', html_tpl)\n return html_tpl",
"def __init__(self, html, out_file):\n self.html = html\n self.out_file = out_file\n self.env = Environment(loader=PackageLoader('html2docx', 'templates'))\n self.template_names = {\n 'content_types': '[Content_Types].xml',\n 'apps': 'docProps/app.xml',\n 'core': 'docProps/core.xml',\n 'rels': '_rels/.rels',\n 'document': 'word/document.xml',\n 'fonts': 'word/fontTable.xml',\n 'document_rels': 'word/_rels/document.xml.rels',\n 'settings': 'word/settings.xml',\n 'styles': 'word/styles.xml',\n }\n self.document_state = []\n self.visited = set()",
"def _parse(\n self, source: str, name: t.Optional[str], filename: t.Optional[str]\n ) -> nodes.Template:\n return Parser(self, source, name, filename).parse()",
"def read_html_template(resume_template_file):\n\n # CREATE VARIABLE RESUME OUTPUT TO STORE HTML CODE\n resume_output = []\n\n # opens resume template html file\n with open(resume_template_file, \"r\") as fin:\n template = list(fin.readlines())\n\n # debugging\n # print(\"template:\", template)\n\n # strips the trailing spaces from each of the lines in template\n for line in template:\n line = line.replace('\\n', '')\n # and saves to the variable resume output\n resume_output.append(line)\n\n # debugging\n # print(\"resume output after read_html:\", resume_output)\n\n # returns output code\n return resume_output",
"def create_html(self):\n # Add html content to the self.doc\n self.doc.asis('<!DOCTYPE html>')\n with self.tag('html'):\n self.design_header()\n self.design_body()\n # Write html content from self.doc\n with codecs.open(self.filestream.name, 'w', 'utf-8') as f:\n html_content = indent(\n self.doc.getvalue(),\n indentation=' ',\n newline='\\r\\n'\n )\n f.write(html_content)",
"def get_html_parts(self):\n script_path = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n 'data')\n with open(os.path.join(script_path, 'head.html'), 'r') as hfile:\n self.header = hfile.read()\n with open(os.path.join(script_path, 'template.html'), 'r') as hfile:\n self.template = hfile.read()\n with open(os.path.join(script_path, 'footer.html'), 'r') as hfile:\n self.footer = hfile.read()\n self.module_icon = os.path.join(script_path, 'icon.png')\n return True",
"def create_page():\n with open('d3mcnulty2.html', 'r') as home:\n return home.read()",
"def build(self):\n self.logger.debug(\"run\")\n\n self.onInit()\n self.work()\n \n self.afterWork()\n\n template = Templateengine(self.currenttemplate)\n template.readTemplateFile()\n contenttype = self.settings.contenttype \n self.defaultTemplateParameter()\n \n try:\n self.content = template.get(self.tplparam)\n except Exception as ex:\n Emergency.stop(ex)\n\n self.onDone()\n \n self.logger.debug(\"done\")",
"def __init__(self, html_contents):\n self.doc = html.document_fromstring(html_contents)",
"def __html__(self, file_path:str):\n raise NotImplementedError",
"def _genspider(self, module, name, domain, template_name, template_file):\n tvars = {\n 'project_name': settings.get('BOT_NAME'),\n 'ProjectName': string_camelcase(settings.get('BOT_NAME')),\n 'module': module,\n 'name': name,\n 'domain': domain,\n 'classname': '%sSpider' % ''.join([s.capitalize() \\\n for s in module.split('_')])\n }\n\n spiders_module = __import__(settings['NEWSPIDER_MODULE'], {}, {}, [''])\n spiders_dir = abspath(dirname(spiders_module.__file__))\n spider_file = \"%s.py\" % join(spiders_dir, module)\n\n shutil.copyfile(template_file, spider_file)\n render_templatefile(spider_file, **tvars)\n print \"Created spider %r using template %r in module:\" % (name, \\\n template_name)\n print \" %s.%s\" % (spiders_module.__name__, module)",
"def define_content(self, html):\n self.html_template(html, lang=\"en\")\n self.add_language(\"en\")",
"def render(self, template_name, **kwargs):\n currentUser = self.current_user\n from_workspace_str = self.get_argument(\"from_workspace\", default=\"0\", strip=False)\n from_workspace = from_workspace_str == \"1\"\n html = self.render_string(template_name, currentUser=currentUser, from_workspace = from_workspace, **kwargs)\n if from_workspace :\n scriptName = self.__class__.__name__\n\n if scriptName.endswith('Handler') :\n scriptName = scriptName[:-7] \n\n path = self.static_url('scripts/' + scriptName + '.js')\n\n js = '<script src=\"' + escape.xhtml_escape(path) + '\" type=\"text/javascript\"></script>'\n html = html + utf8(js)\n self.finish(html)\n return\n\n # Insert the additional JS and CSS added by the modules on the page\n js_embed = []\n js_files = []\n css_embed = []\n css_files = []\n html_heads = []\n html_bodies = []\n for module in getattr(self, \"_active_modules\", {}).values():\n embed_part = module.embedded_javascript()\n if embed_part:\n js_embed.append(utf8(embed_part))\n file_part = module.javascript_files()\n if file_part:\n if isinstance(file_part, (unicode_type, bytes_type)):\n js_files.append(file_part)\n else:\n js_files.extend(file_part)\n embed_part = module.embedded_css()\n if embed_part:\n css_embed.append(utf8(embed_part))\n file_part = module.css_files()\n if file_part:\n if isinstance(file_part, (unicode_type, bytes_type)):\n css_files.append(file_part)\n else:\n css_files.extend(file_part)\n head_part = module.html_head()\n if head_part:\n html_heads.append(utf8(head_part))\n body_part = module.html_body()\n if body_part:\n html_bodies.append(utf8(body_part))\n\n def is_absolute(path):\n return any(path.startswith(x) for x in [\"/\", \"http:\", \"https:\"])\n if js_files:\n # Maintain order of JavaScript files given by modules\n paths = []\n unique_paths = set()\n for path in js_files:\n if not is_absolute(path):\n path = self.static_url(path)\n if path not in unique_paths:\n paths.append(path)\n unique_paths.add(path)\n js = ''.join('<script src=\"' + escape.xhtml_escape(p) +\n '\" type=\"text/javascript\"></script>'\n for p in paths)\n sloc = html.rindex(b'</body>')\n html = html[:sloc] + utf8(js) + b'\\n' + html[sloc:]\n if js_embed:\n js = b'<script type=\"text/javascript\">\\n//<![CDATA[\\n' + \\\n b'\\n'.join(js_embed) + b'\\n//]]>\\n</script>'\n sloc = html.rindex(b'</body>')\n html = html[:sloc] + js + b'\\n' + html[sloc:]\n if css_files:\n paths = []\n unique_paths = set()\n for path in css_files:\n if not is_absolute(path):\n path = self.static_url(path)\n if path not in unique_paths:\n paths.append(path)\n unique_paths.add(path)\n css = ''.join('<link href=\"' + escape.xhtml_escape(p) + '\" '\n 'type=\"text/css\" rel=\"stylesheet\"/>'\n for p in paths)\n hloc = html.index(b'</head>')\n html = html[:hloc] + utf8(css) + b'\\n' + html[hloc:]\n if css_embed:\n css = b'<style type=\"text/css\">\\n' + b'\\n'.join(css_embed) + \\\n b'\\n</style>'\n hloc = html.index(b'</head>')\n html = html[:hloc] + css + b'\\n' + html[hloc:]\n if html_heads:\n hloc = html.index(b'</head>')\n html = html[:hloc] + b''.join(html_heads) + b'\\n' + html[hloc:]\n if html_bodies:\n hloc = html.index(b'</body>')\n html = html[:hloc] + b''.join(html_bodies) + b'\\n' + html[hloc:]\n self.finish(html)",
"def __init__(self, file):\n self.HTML = \"\"\n self.FILE = file\n self.load_data()",
"def __init__(self, params):\n\n try:\n f = open(\"html/navigation.html\")\n self.content = f.read()\n f.close()\n except Exception, e:\n print \"Failed to find HTML template.\"\n sys.exit()\n self.filter = params",
"def html(template, **data):\n tmpl = template_loader.load(template)\n context = {}\n context_setup.dispatch(context)\n context.update(data)\n stream = tmpl.generate(**context)\n return stream",
"def generateHtml(self, tokens, html, css):\n\n\t\tf = open(html, \"w\")\n\t\tf.write(\"\"\"<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n<title>Document</title>\n</head>\n\t\t\"\"\")\n\t\tif os.path.exists(\"css/default.css\"):\n\t\t\tstyle = open(\"css/default.css\", \"r\").read()\n\t\telse:\n\t\t\tstyle = open(f\"{css}css/default.css\", \"r\").read()\n\t\tf.write(f\"<style>\\n{style}\\n</style>\\n\")\n\t\tf.write(\"<body>\")\n\t\tf.write('<div class=\"markdown-body\">')\n\t\tfor t in tokens:\n\t\t\tf.write(t.html)\n\t\tf.write(\"</div>\")\n\t\tf.write(\"</body>\")\n\t\tf.write(\"</html>\")\n\t\tf.close()",
"def html_builder(s, meta, template_path):\n s = convert_text(s)\n \n # create right navigation panel: \n right = toc_panel(s)\n \n with open(template_path, 'r') as html:\n contents = html.read()\n\n soup = BeautifulSoup(contents, 'lxml')\n\n \n right_div = soup.find(id='sidebar-wrapper')\n book_main = soup.find(id='content')\n metadata = soup.find(id='metadata-content')\n\n for key, value in meta.items():\n\n new_p = soup.new_tag(\"label\")\n value = key + \": \" + value\n new_p.append((value)) \n metadata.insert(0, new_p)\n \n soup.new_tag(\"div\", right_div.append(BeautifulSoup(right, 'lxml')))\n soup.new_tag(\"div\", book_main.insert(1, BeautifulSoup(s, 'html.parser')))\n \n # format main text as html:\n \n full_html = soup\n return str(full_html)"
] | [
"0.683075",
"0.64649874",
"0.63023174",
"0.6233896",
"0.6180485",
"0.6096842",
"0.6082805",
"0.6066677",
"0.606245",
"0.6048339",
"0.60287607",
"0.6013675",
"0.6008966",
"0.59312356",
"0.5891936",
"0.589168",
"0.5890535",
"0.5880855",
"0.58584857",
"0.58117074",
"0.5795981",
"0.5785086",
"0.5754053",
"0.5728523",
"0.5722051",
"0.57071316",
"0.5693298",
"0.56848884",
"0.56471026",
"0.56448346"
] | 0.7633674 | 0 |
Computes labels and inertia using a full distance matrix. This will overwrite the 'distances' array inplace. | def _labels_inertia_precompute_dense(norm, X, sample_weight, centers, distances):
n_samples = X.shape[0]
if norm == 'L2':
labels, mindist = pairwise_distances_argmin_min(
X=X, Y=centers, metric='euclidean', metric_kwargs={'squared': True})
elif norm == 'L1':
labels, mindist = pairwise_distances_argmin_min(
X=X, Y=centers, metric='manhattan')
else: # pragma no cover
raise NotImplementedError(
f"Not implemented for norm '{norm}'.")
# cython k-means code assumes int32 inputs
labels = labels.astype(numpy.int32, copy=False)
if n_samples == distances.shape[0]:
# distances will be changed in-place
distances[:] = mindist
inertia = (mindist * sample_weight).sum()
return labels, inertia | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _assign_labels_array(X, sample_weight, x_squared_norms, centers,\n labels, distances):\n n_clusters = centers.shape[0]\n n_samples = X.shape[0]\n store_distances = 0\n inertia = 0.0\n\n dtype = numpy.float32 if centers.dtype == numpy.float32 else numpy.float64\n center_squared_norms = numpy.zeros(n_clusters, dtype=dtype)\n\n if n_samples == distances.shape[0]:\n store_distances = 1\n\n for center_idx in range(n_clusters):\n center_squared_norms[center_idx] = numpy.dot(\n centers[center_idx, :], centers[center_idx, :])\n\n for sample_idx in range(n_samples):\n min_dist = -1\n for center_idx in range(n_clusters):\n dist = 0.0\n # hardcoded: minimize euclidean distance to cluster center:\n # ||a - b||^2 = ||a||^2 + ||b||^2 -2 <a, b>\n dist += numpy.dot(X[sample_idx, :], centers[center_idx, :])\n dist *= -2\n dist += center_squared_norms[center_idx]\n dist += x_squared_norms[sample_idx]\n dist *= sample_weight[sample_idx]\n if min_dist == -1 or dist < min_dist:\n min_dist = dist\n labels[sample_idx] = center_idx\n\n if store_distances:\n distances[sample_idx] = min_dist\n inertia += min_dist\n\n return inertia",
"def calculate_all_distances_to_center(self):\n all_distances = pd.DataFrame()\n for label in np.unique(self.embedding_df['cluster']): \n distance_df = self.calculate_distances_for_cluster(label)\n all_distances = pd.concat([all_distances, distance_df])\n \n self.embedding_df = self.embedding_df.merge(all_distances, left_index=True, right_index=True)",
"def _labels_inertia_skl(X, sample_weight, x_squared_norms, centers,\n distances=None):\n n_samples = X.shape[0]\n sample_weight = _check_sample_weight(sample_weight, X)\n # set the default value of centers to -1 to be able to detect any anomaly\n # easily\n labels = numpy.full(n_samples, -1, numpy.int32)\n if distances is None:\n distances = numpy.zeros(shape=(0,), dtype=X.dtype)\n # distances will be changed in-place\n if issparse(X):\n inertia = _assign_labels_csr(\n X, sample_weight, x_squared_norms, centers, labels,\n distances=distances)\n else:\n inertia = _assign_labels_array(\n X, sample_weight, x_squared_norms, centers, labels,\n distances=distances)\n return labels, inertia",
"def update_distances(self, pool_features, labelled_features, reset_dist=False):\n if reset_dist:\n self.min_distances = None\n # Update min_distances for unlabelled examples given new cluster center.\n dist = pairwise_distances(pool_features, labelled_features, metric='euclidean', force_all_finite=True)\n if self.min_distances is None:\n self.min_distances = np.min(dist, axis=1).reshape(-1, 1)\n else:\n self.min_distances = np.minimum(self.min_distances, dist)",
"def cal_distances(embeddings):\n # calculate\n dist = np.zeros([len(embeddings), len(embeddings)], dtype=float)\n for ii in xrange(len(embeddings)):\n for jj in xrange(ii + 1, len(embeddings)):\n dist[ii, jj] = np.linalg.norm(embeddings[ii] - embeddings[jj])\n dist[jj, ii] = dist[ii, jj] \n \n # return\n return dist",
"def _assign_labels_csr(X, sample_weight, x_squared_norms, centers,\n labels, distances):\n if (distances is not None and\n distances.shape != (X.shape[0], )):\n raise ValueError( # pragma: no cover\n f\"Dimension mismatch for distance got \"\n f\"{distances.shape}, expecting \"\n f\"{(X.shape[0], centers.shape[0])}.\")\n n_clusters = centers.shape[0]\n n_samples = X.shape[0]\n store_distances = 0\n inertia = 0.0\n\n if centers.dtype == numpy.float32:\n center_squared_norms = numpy.zeros(n_clusters, dtype=numpy.float32)\n else:\n center_squared_norms = numpy.zeros(n_clusters, dtype=numpy.float64)\n\n if n_samples == distances.shape[0]:\n store_distances = 1\n\n for center_idx in range(n_clusters):\n center_squared_norms[center_idx] = numpy.dot(\n centers[center_idx, :], centers[center_idx, :])\n\n for sample_idx in range(n_samples):\n min_dist = -1\n for center_idx in range(n_clusters):\n dist = 0.0\n # hardcoded: minimize euclidean distance to cluster center:\n # ||a - b||^2 = ||a||^2 + ||b||^2 -2 <a, b>\n dist += X[sample_idx, :] @ centers[center_idx, :].reshape((-1, 1))\n dist *= -2\n dist += center_squared_norms[center_idx]\n dist += x_squared_norms[sample_idx]\n dist *= sample_weight[sample_idx]\n if min_dist == -1 or dist < min_dist:\n min_dist = dist\n labels[sample_idx] = center_idx\n if store_distances:\n distances[sample_idx] = dist\n inertia += min_dist\n\n return inertia",
"def _labels_inertia(self, X, centers):\n n_samples = X.shape[0]\n temp_centers = np.array(centers[:])\n dist = cdist(X, temp_centers.reshape(len(temp_centers)/X.shape[1], X.shape[1]), 'euclidean')\n labels = dist.argmin(axis=1)\n inertia = np.sum(np.min(dist, axis=1))\n\n return labels, inertia",
"def _labels_inertia(self, X, centers):\n n_samples = X.shape[0]\n temp_centers = np.array(centers[:])\n dist = cdist(X, temp_centers.reshape(len(temp_centers)/X.shape[1], X.shape[1]), 'euclidean')\n labels = dist.argmin(axis=1)\n inertia = np.sum(np.min(dist, axis=1))\n\n return labels, inertia",
"def cdistance(self, distances):\n self.distanceMatrix = distances\n self.dataChange()",
"def drive_distance_all(distances, motors):\n return null",
"def _data_labels_distance(self, samples, tfidf_dict, distance_metric='cosine'):\n \n def distance_fn(x):\n return sklearn.metrics.pairwise.pairwise_distances(\n x, x[0], metric=distance_metric).ravel() * 100\n\n base_doc_vector = np.fromiter(tfidf_dict.values(),float)\n base_doc_keys = list(tfidf_dict.keys())\n vectors = [base_doc_vector]\n for sample in samples:\n sample_vector = np.zeros(len(base_doc_keys))\n for token in sample.split():\n token_index = base_doc_keys.index(token)\n sample_vector[token_index] = base_doc_vector[token_index]\n vectors.append(sample_vector)\n\n\n distances = distance_fn(sp.sparse.csr_matrix(vectors))\n return np.array(vectors), distances",
"def normalize_labels(labels):\n number_of_labels = len(labels)\n number_of_species = get_number_of_species()\n labels_norm = np.zeros(shape=(number_of_labels, number_of_species))\n for i in range(number_of_labels):\n for label in labels[i]:\n labels_norm[i][label] = 1\n return labels_norm",
"def moment_of_inertia(input, labels, index = None):\n\tinput = numpy.asarray(input)\n\tif labels == None:\n\t\traise RuntimeError, 'labels are needed'\n\tif labels.shape != input.shape:\n\t\traise RuntimeError, 'input and labels shape are not equal'\n\tmoments = []\n\tfor label in scipy.ndimage.find_objects(labels):\n\t\tsubmask = input[label].copy()\n\t\tmoment = _moment(submask)\n\t\tmoments.append(moment)\n\treturn moments",
"def format_distance_matrix(labels, data):\r\n return format_matrix(data, labels, labels)",
"def normalize_distancematrix(self):\n INF = self.distmat.max().max()\n df = self.distmat.fillna(INF)\n self.distmat = (df - df.min()) / (df.max() - df.min())",
"def __build_distance_matrix(self):\n for i in range(0, len(self.__corpus)):\n doc_i = self.__corpus[i]\n for j in range(i + 1, len(self.__corpus)):\n doc_j = self.__corpus[j]\n distance = doc_i.calc_distance(doc_j)\n self.__distance_matrix.append(distance)",
"def _computeDistances(self) -> None:\n length = len(self.data)\n for i, sequenceOne in enumerate(self.data):\n print(f\"[SeqCluBaselineOffline] Computing distances is at iteration {i} of {length}.\")\n for j, sequenceTwo in enumerate(self.data):\n if i == j:\n self.distances[i][j] = 0\n continue\n distance = self.distanceMeasure.calculateDistance(sequenceOne, sequenceTwo)\n self.distances[i][j] = distance\n self.distances[j][i] = distance",
"def full_matrix(ops, mut):\n \n index_mat = np.ones((len(ops),len(ops)))\n pairs = np.argwhere(np.triu(index_mat)==1)\n dist_mat = np.zeros((len(ops),len(ops)))\n distances = []\n labels = []\n\n for pair in pairs:\n mi, label = mut.distance(ops[pair[0]], ops[pair[1]])\n distances.append(mi)\n labels.append(label)\n with ProgressBar():\n distances = dask.compute(*distances)\n\n for i in range(len(labels)):\n mut.memo[labels[i]] = distances[i]",
"def F_mat(self, distances):\n distances_norm2 = norm2(distances)\n distances_norm = np.sqrt(distances_norm2)\n isColliding = self.isColliding(distances_norm)[:, :, :, None]\n\n # Repulsion force when a collision happens\n f_colliding = (2/self.d_coll**2)*isColliding\n \n # Interaction force\n ident = np.identity(np.shape(distances)[1])[None, :, :]\n d = (ident+distances_norm)\n dn = (d**self.n)[:, :, :, None]\n d2 = (ident+distances_norm2)\n d2n = (d2**(self.n+1))[:, :, :, None]\n\n f_interact = self.n*self.d_attr**self.n*(self.d_attr**self.n-dn)/(d2n + 10e-50)*(1-isColliding)\n\n # Total Force\n f = (f_colliding + f_interact)*distances\n\n # Remove self-interaction\n diag = np.einsum('ijj->ij', f[:, :, :, 0])\n diag[:, :] = 0\n\n diag2 = np.einsum('ijj->ij', f[:, :, :, 1])\n diag2[:, :] = 0\n\n return f",
"def rebuild_the_laplacians():\n local_matrix = InteractomeInterface()\n local_matrix.full_rebuild()\n\n annot_matrix = AnnotomeInterface()\n annot_matrix.full_rebuild()",
"def update_agent_distances_vector(self):\n count = 0\n for agent in self.agents:\n agent_loc = agent.getz()\n\n for i, each_task in enumerate(self.tasks):\n dist = euclid_dist(agent_loc, each_task.getloc())\n self.agent_distances[count][i] = dist\n count += 1\n if self.DEBUG:\n print(self.agent_distances)",
"def _compute(self):\n\n super(ConfusionMatrix, self)._compute()\n\n if __debug__:\n if not self.__matrix is None:\n debug(\"LAZY\",\n \"Have to recompute %s#%s\" \\\n % (self.__class__.__name__, id(self)))\n\n\n # TODO: BinaryClassifier might spit out a list of predictions for each\n # value need to handle it... for now just keep original labels\n try:\n # figure out what labels we have\n labels = \\\n list(reduce(lambda x, y: x.union(Set(y[0]).union(Set(y[1]))),\n self.sets,\n Set(self.__labels)))\n except:\n labels = self.__labels\n\n # Check labels_map if it was provided if it covers all the labels\n labels_map = self.__labels_map\n if labels_map is not None:\n labels_set = Set(labels)\n map_labels_set = Set(labels_map.values())\n\n if not map_labels_set.issuperset(labels_set):\n warning(\"Provided labels_map %s is not coherent with labels \"\n \"provided to ConfusionMatrix. No reverse mapping \"\n \"will be provided\" % labels_map)\n labels_map = None\n\n # Create reverse map\n labels_map_rev = None\n if labels_map is not None:\n labels_map_rev = {}\n for k,v in labels_map.iteritems():\n v_mapping = labels_map_rev.get(v, [])\n v_mapping.append(k)\n labels_map_rev[v] = v_mapping\n self.__labels_map_rev = labels_map_rev\n\n labels.sort()\n self.__labels = labels # store the recomputed labels\n\n Nlabels, Nsets = len(labels), len(self.sets)\n\n if __debug__:\n debug(\"CM\", \"Got labels %s\" % labels)\n\n # Create a matrix for all votes\n mat_all = N.zeros( (Nsets, Nlabels, Nlabels), dtype=int )\n\n # create total number of samples of each label counts\n # just for convinience I guess since it can always be\n # computed from mat_all\n counts_all = N.zeros( (Nsets, Nlabels) )\n\n # reverse mapping from label into index in the list of labels\n rev_map = dict([ (x[1], x[0]) for x in enumerate(labels)])\n for iset, set_ in enumerate(self.sets):\n for t,p in zip(*set_[:2]):\n mat_all[iset, rev_map[p], rev_map[t]] += 1\n\n\n # for now simply compute a sum of votes across different sets\n # we might do something more sophisticated later on, and this setup\n # should easily allow it\n self.__matrix = N.sum(mat_all, axis=0)\n self.__Nsamples = N.sum(self.__matrix, axis=0)\n self.__Ncorrect = sum(N.diag(self.__matrix))\n\n TP = N.diag(self.__matrix)\n offdiag = self.__matrix - N.diag(TP)\n stats = {\n '# of labels' : Nlabels,\n 'TP' : TP,\n 'FP' : N.sum(offdiag, axis=1),\n 'FN' : N.sum(offdiag, axis=0)}\n\n stats['CORR'] = N.sum(TP)\n stats['TN'] = stats['CORR'] - stats['TP']\n stats['P'] = stats['TP'] + stats['FN']\n stats['N'] = N.sum(stats['P']) - stats['P']\n stats[\"P'\"] = stats['TP'] + stats['FP']\n stats[\"N'\"] = stats['TN'] + stats['FN']\n stats['TPR'] = stats['TP'] / (1.0*stats['P'])\n # reset nans in TPRs to 0s whenever there is no entries\n # for those labels among the targets\n stats['TPR'][stats['P'] == 0] = 0\n stats['PPV'] = stats['TP'] / (1.0*stats[\"P'\"])\n stats['NPV'] = stats['TN'] / (1.0*stats[\"N'\"])\n stats['FDR'] = stats['FP'] / (1.0*stats[\"P'\"])\n stats['SPC'] = (stats['TN']) / (1.0*stats['FP'] + stats['TN'])\n\n MCC_denom = N.sqrt(1.0*stats['P']*stats['N']*stats[\"P'\"]*stats[\"N'\"])\n nz = MCC_denom!=0.0\n stats['MCC'] = N.zeros(stats['TP'].shape)\n stats['MCC'][nz] = \\\n (stats['TP'] * stats['TN'] - stats['FP'] * stats['FN'])[nz] \\\n / MCC_denom[nz]\n\n stats['ACC'] = N.sum(TP)/(1.0*N.sum(stats['P']))\n stats['ACC%'] = stats['ACC'] * 100.0\n\n #\n # ROC computation if available\n ROC = ROCCurve(labels=labels, sets=self.sets)\n aucs = ROC.aucs\n if len(aucs)>0:\n stats['AUC'] = aucs\n if len(aucs) != Nlabels:\n raise RuntimeError, \\\n \"We must got a AUC per label. Got %d instead of %d\" % \\\n (len(aucs), Nlabels)\n self.ROC = ROC\n else:\n # we don't want to provide ROC if it is bogus\n stats['AUC'] = [N.nan] * Nlabels\n self.ROC = None\n\n\n # compute mean stats\n for k,v in stats.items():\n stats['mean(%s)' % k] = N.mean(v)\n\n self._stats.update(stats)",
"def update_distmatrix(self, i, j, clusters):\n return self.linkage(clusters, i, j, self._dendrogram)",
"def _distance_matrix(self):\n\n # Log the type of metric being used in Sequencing\n logger.info('Using {} Distance'.format(self.measure))\n\n # Convert the nodal coordinate tuples to a np.array\n coords = np.vstack(map(np.array, self.coords.values()))\n \n if self.measure == 'haversine':\n # Partially applied haversine function that takes a coord and computes the vector distances for all coords\n haversine = lambda coord: get_hav_distance(coords[:, 0], coords[:, 1], *coord) \n # Map the partially applied function over all coordinates, and stack to a matrix\n return np.vstack(map(haversine, coords))\n\n # Partially applied haversine function that takes a coord and computes the vector distances for all coords\n euclidean = lambda coord: get_euclidean_dist(coords, coord)\n # Map the partially applied function over all coordinates, and stack to a matrix\n return np.vstack(map(euclidean, coords))",
"def _update_distances(dist_matrix, node1, node2, new_cluster):\n # Initialize new distance matrix.\n node_label = pd.Index([str(new_cluster)])\n new_labels = dist_matrix.axes[0].drop([node1, node2]).append(node_label)\n new_dist_matrix = pd.DataFrame(np.nan, index=new_labels, columns=new_labels)\n \n # Fill in distance matrix\n # First copy over values that stay the same\n for row in new_dist_matrix.axes[0].drop(node_label):\n for col in new_dist_matrix.axes[1].drop([node_label[0], row]):\n new_dist_matrix.at[row, col] = dist_matrix.at[row, col]\n new_dist_matrix.at[col, row] = dist_matrix.at[row, col]\n \n # Distance from other OTU, k, to new node, i-j (wiki EQ 3):\n # d(i-j, k) = .5 * (dist(i, k) + dist(j, k) - dist(i, j))\n for k in new_dist_matrix.axes[1].drop(node_label):\n dist = .5 * (dist_matrix.at[k, node1]\n + dist_matrix.at[k, node2]\n - dist_matrix.at[node1, node2])\n new_dist_matrix.at[node_label, k] = dist\n new_dist_matrix.at[k, node_label] = dist\n \n # Return the distance matrix.\n return new_dist_matrix",
"def dist_labels(labels):\n if not labels.any():\n return labels\n return cv2.distanceTransform(labels,\n distanceType=cv2.DIST_L1,\n maskSize=3,\n dstType=cv2.CV_8U)",
"def fit(self, df):\n if not isinstance(df, np.ndarray):\n raise ValueError(\"Data must be ndarray type\")\n\n dist_matrix = pairwise_dist(df)\n labels = np.arange(df.shape[0])\n\n if self.method == \"naive\":\n self.linkage_matrix = linkage_naive(dist_matrix)\n else:\n self.linkage_matrix = mst_linkage(labels, dist_matrix)\n\n return self",
"def assign_labels_to_centroids(self):\n labelled_centroids = []\n for i in range(len(self.final_clusters)):\n labels = map(lambda x: x[0], self.final_clusters[i])\n # pick the most common label\n most_common = Counter(labels).most_common(1)[0][0]\n c = np.round(len([item for item in self.final_clusters[i] if item[0]==1])/len(self.final_clusters[i]),2)\n if c>=0.46:\n most_common = 1.0\n centroid = (most_common, self.final_centroids[i])\n labelled_centroids.append(centroid)\n\n self.labelled_centroids = labelled_centroids\n print(\"cluster_0: \", np.round(len([item for item in self.final_clusters[0] if item[0]==1])/len(self.final_clusters[0]),2), \"size_0: \", len(self.final_clusters[0]))\n print(\"cluster_1: \", np.round(len([item for item in self.final_clusters[1] if item[0]==1])/len(self.final_clusters[1]),2), \"size_1: \", len(self.final_clusters[1]))\n #print(\"cluster_2: \", np.round(len([item for item in self.final_clusters[2] if item[0]==1])/len(self.final_clusters[2]),2), \"size_2: \", len(self.final_clusters[2]))\n #print(\"cluster_3: \", np.round(len([item for item in self.final_clusters[3] if item[0]==1])/len(self.final_clusters[3]),2), \"size_2: \", len(self.final_clusters[3]))",
"def _compute_distances(self, atoms: List[CellAtom]):\n muon = self._cell_atoms[self._muon_index]\n\n for atom in atoms:\n atom.distance_from_muon = np.linalg.norm(muon.position - atom.position)",
"def calculate_mAP(det_boxes, det_labels, det_scores, true_boxes, true_labels, true_difficulties, on_dev, data_folder):\n with open(os.path.join(data_folder,'label_map.json'), 'r') as j:\n label_map = json.load(j)\n \n rev_label_map = {v: k for k, v in label_map.items()} \n if on_dev:\n device = det_boxes[0].device # torch.device('cuda:1')\n \n # these are all lists of tensors of the same length, i.e. number of images\n assert len(det_boxes) == len(det_labels) == len(det_scores) == len(true_boxes) == len(true_labels) == len(true_difficulties) \n n_classes = len(label_map)\n \n # Store all (true) objects in a single continuous tensor while keeping track of the image it is from\n true_images = list()\n for i in range(len(true_labels)):\n true_images.extend([i] * true_labels[i].size(0))\n # ==============================================================================================================================\n if on_dev:\n true_images = torch.LongTensor(true_images).to(device)#-(n_objects), n_objects is the total no. of objects across all images\n else:\n true_images = torch.LongTensor(true_images).cuda()#-----(n_objects), n_objects is the total no. of objects across all images\n \n true_boxes = torch.cat(true_boxes, dim=0)#------------------(n_objects, 4)\n true_labels = torch.cat(true_labels, dim=0)#----------------(n_objects)\n true_difficulties = torch.cat(true_difficulties, dim=0)#----(n_objects)\n # ==============================================================================================================================\n assert true_images.size(0) == true_boxes.size(0) == true_labels.size(0)\n\n # Store all detections in a single continuous tensor while keeping track of the image it is from\n det_images = list()\n for i in range(len(det_labels)):\n det_images.extend([i] * det_labels[i].size(0))\n \n if on_dev:\n det_images = torch.LongTensor(det_images).to(device) # (n_detections)\n else:\n det_images = torch.LongTensor(det_images).cuda() # (n_detections)\n \n det_boxes = torch.cat(det_boxes, dim=0) # (n_detections, 4)\n det_labels = torch.cat(det_labels, dim=0) # (n_detections)\n det_scores = torch.cat(det_scores, dim=0) # (n_detections)\n\n assert det_images.size(0) == det_boxes.size(0) == det_labels.size(0) == det_scores.size(0)\n\n # Calculate APs for each class (except background)\n average_precisions = torch.zeros((n_classes - 1), dtype=torch.float) # (n_classes - 1)\n for c in range(1, n_classes):\n # Extract only objects with this class\n true_class_images = true_images[true_labels == c] # (n_class_objects)\n true_class_boxes = true_boxes[true_labels == c] # (n_class_objects, 4)\n true_class_difficulties = true_difficulties[true_labels == c] # (n_class_objects)\n n_easy_class_objects = (1 - true_class_difficulties).sum().item() # ignore difficult objects\n\n # Keep track of which true objects with this class have already been 'detected'\n # So far, none\n if on_dev:\n true_class_boxes_detected = torch.zeros((true_class_difficulties.size(0)), dtype=torch.uint8).to(device)#(n_class_objects)\n else:\n true_class_boxes_detected = torch.zeros((true_class_difficulties.size(0)), dtype=torch.uint8).cuda() # (n_class_objects)\n\n # Extract only detections with this class\n det_class_images = det_images[det_labels == c] # (n_class_detections)\n det_class_boxes = det_boxes[det_labels == c] # (n_class_detections, 4)\n det_class_scores = det_scores[det_labels == c] # (n_class_detections)\n n_class_detections = det_class_boxes.size(0)\n if n_class_detections == 0:\n continue\n\n # Sort detections in decreasing order of confidence/scores\n det_class_scores, sort_ind = torch.sort(det_class_scores, dim=0, descending=True) # (n_class_detections)\n det_class_images = det_class_images[sort_ind] # (n_class_detections)\n det_class_boxes = det_class_boxes[sort_ind] # (n_class_detections, 4)\n\n # In the order of decreasing scores, check if true or false positive\n if on_dev:\n true_positives = torch.zeros((n_class_detections), dtype=torch.float).to(device) # (n_class_detections)\n false_positives = torch.zeros((n_class_detections), dtype=torch.float).to(device) # (n_class_detections)\n else:\n true_positives = torch.zeros((n_class_detections), dtype=torch.float).cuda() # (n_class_detections)\n false_positives = torch.zeros((n_class_detections), dtype=torch.float).cuda() # (n_class_detections)\n \n for d in range(n_class_detections):\n this_detection_box = det_class_boxes[d].unsqueeze(0) # (1, 4)\n this_image = det_class_images[d] # (), scalar\n\n # Find objects in the same image with this class, their difficulties, and whether they have been detected before\n object_boxes = true_class_boxes[true_class_images == this_image] # (n_class_objects_in_img)\n object_difficulties = true_class_difficulties[true_class_images == this_image] # (n_class_objects_in_img)\n # If no such object in this image, then the detection is a false positive\n if object_boxes.size(0) == 0:\n false_positives[d] = 1\n continue\n\n # Find maximum overlap of this detection with objects in this image of this class\n overlaps = find_jaccard_overlap(this_detection_box, object_boxes) # (1, n_class_objects_in_img)\n max_overlap, ind = torch.max(overlaps.squeeze(0), dim=0) # (), () - scalars\n\n # 'ind' is the index of the object in these image-level tensors 'object_boxes', 'object_difficulties'\n # In the original class-level tensors 'true_class_boxes', etc., 'ind' corresponds to object with index...\n original_ind = torch.LongTensor(range(true_class_boxes.size(0)))[true_class_images == this_image][ind]\n # We need 'original_ind' to update 'true_class_boxes_detected'\n\n # If the maximum overlap is greater than the threshold of 0.5, it's a match\n if max_overlap.item() > 0.5:\n # If the object it matched with is 'difficult', ignore it\n if object_difficulties[ind] == 0:\n # If this object has already not been detected, it's a true positive\n if true_class_boxes_detected[original_ind] == 0:\n true_positives[d] = 1\n true_class_boxes_detected[original_ind] = 1 # this object has now been detected/accounted for\n # Otherwise, it's a false positive (since this object is already accounted for)\n else:\n false_positives[d] = 1\n # Otherwise, the detection occurs in a different location than the actual object, and is a false positive\n else:\n false_positives[d] = 1\n\n # Compute cumulative precision and recall at each detection in the order of decreasing scores\n cumul_true_positives = torch.cumsum(true_positives, dim=0) # (n_class_detections)\n cumul_false_positives = torch.cumsum(false_positives, dim=0) # (n_class_detections)\n cumul_precision = cumul_true_positives / (\n cumul_true_positives + cumul_false_positives + 1e-10) # (n_class_detections)\n cumul_recall = cumul_true_positives / n_easy_class_objects # (n_class_detections)\n\n # Find the mean of the maximum of the precisions corresponding to recalls above the threshold 't'\n recall_thresholds = torch.arange(start=0, end=1.1, step=.1).tolist() # (11)\n if on_dev:\n precisions = torch.zeros((len(recall_thresholds)), dtype=torch.float).to(device) # (11)\n else:\n precisions = torch.zeros((len(recall_thresholds)), dtype=torch.float).cuda()\n for i, t in enumerate(recall_thresholds):\n recalls_above_t = cumul_recall >= t\n if recalls_above_t.any():\n precisions[i] = cumul_precision[recalls_above_t].max()\n else:\n precisions[i] = 0.\n average_precisions[c - 1] = precisions.mean() # c is in [1, n_classes - 1]\n\n # Calculate Mean Average Precision (mAP)\n mean_average_precision = average_precisions.mean().item()\n\n # Keep class-wise average precisions in a dictionary\n average_precisions = {rev_label_map[c + 1]: v for c, v in enumerate(average_precisions.tolist())}\n\n return average_precisions, mean_average_precision"
] | [
"0.60576165",
"0.57919693",
"0.566674",
"0.5591969",
"0.5492387",
"0.5474446",
"0.54441214",
"0.54441214",
"0.53986883",
"0.5384593",
"0.536254",
"0.53395325",
"0.5278341",
"0.5267286",
"0.5205148",
"0.51953477",
"0.5181319",
"0.5104884",
"0.5094013",
"0.50924665",
"0.5090735",
"0.5073989",
"0.50617003",
"0.50410783",
"0.5011336",
"0.49517322",
"0.49478552",
"0.49417612",
"0.49036926",
"0.49008042"
] | 0.5923138 | 1 |
Interactively retrieves the crendential for a user_id client_id user identifier client_secret user's secret key persist True to immediately store the credential, False otherwise (default) | def get_client_credentials_intractive(self, client_id, client_secret, persist=False):
if type(client_id) == unicode:
client_id = client_id.encode('ascii')
if type(client_secret) == unicode:
client_secret = client_secret.encode('ascii')
flow = OAuth2WebServerFlow(client_id, client_secret, self._OAUTH_SCOPE,
redirect_uri=self._REDIRECT_URI)
authorize_url = flow.step1_get_authorize_url()
print 'Go to the following link in your browser: ' + authorize_url
code = raw_input('Enter verification code: ').strip()
credentials = flow.step2_exchange(code)
if persist:
self.store_client_credentials(client_id, credentials)
return credentials | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_credentials():\n store = Storage(CLIENT_CREDENTIALS_FILE)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + CLIENT_CREDENTIALS_FILE)\n return credentials",
"def get_credentials():\n\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'appsactivity-python-showtime.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n print('Storing credentials to ' + credential_path)\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,'drive-python-quickstart.json')\r\n\r\n store = Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'credentialv_modify.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def user_client(user):\n client = Client()\n client.force_login(user)\n return client",
"def create_client():\n result = False\n if g.client_id in drivers:\n result = True\n return jsonify({'Success': result})",
"def get_credentials():\n store = Storage(CREDENTIAL_PATH)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store, None)\n return credentials",
"def get_credentials() -> client.Credentials:\n\n credential_path = os.path.join(HOME_DIR, \"google-credentials.json\")\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(os.path.join(HOME_DIR, CLIENT_SECRET_FILE), SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n # This attempts to open an authorization page in the default web browser, and asks the user\n # to grant the bot access to their data. If the user grants permission, the run_flow()\n # function returns new credentials.\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print(\"Storing credentials to \" + credential_path)",
"def create_client(name):\n client = Client(name=name)\n print(client.client_secret)\n db.session.add(client)\n db.session.commit()\n return client",
"def authorize_credentials():\n credentials = STORAGE.get()\n # If the credentials doesn't exist in the storage location then run the flow\n if credentials is None or credentials.invalid:\n flow = flow_from_clientsecrets(CREDENTIAL_JSON, scope=SCOPE)\n http = httplib2.Http()\n credentials = run_flow(flow, STORAGE, http=http)\n return credentials",
"def use_cred():\n prompt = \"Use Credentials? (N for Anonymous)\"\n return query_yes_no(question=prompt, default=\"no\")",
"def get_credentials():\n # normal, sane way of doing this that really shouldn't be changed\n #home_dir = os.path.expanduser('~')\n #credential_dir = os.path.join(home_dir, '.credentials')\n #if not os.path.exists(credential_dir):\n # os.makedirs(credential_dir)\n #credential_path = os.path.join(credential_dir,'calendar-python-quickstart.json')\n\n # stupid hacky way that I came up with to fix an issue with running this app as root\n credential_path = os.path.join('./credentials','calendar-python-quickstart.json') \n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def ft_credential_server():\n ensure_credential_server_running( run_once=True, foreground=True )",
"def credential_get(uniqueID: str):\n\n cert = safeisland.certificate(uniqueID)\n return {\"payload\": cert}",
"def get_stored_credentials(user_id):\n #\n # To instantiate an OAuth2Credentials instance from a Json\n # representation, use the oauth2client.client.Credentials.new_from_json\n # class method.\n user = engine.query(User).filter(userId=user_id).first()\n if user:\n user_dict = user.__dict__\n if user_dict['credentials']:\n # credentials = Credentials.new_from_json(user['credentials'])\n credentials = json.loads(user_dict['credentials'])\n token_expiry = credentials['token_expiry']\n dexp = parser.parse(str(token_expiry))\n dexp = dexp.replace(tzinfo=None)\n dnow = datetime.now()\n\n if dexp > dnow:\n return Credentials.new_from_json(user_dict['credentials'])\n else:\n status_code, data = renew_access_token(client_id=credentials['client_id'],\n client_secret=credentials['client_secret'],\n refresh_token=credentials['refresh_token'],\n )\n if status_code == INT_OK:\n credentials['access_token'] = data['access_token']\n credentials['token_expiry'] = datetime_util(datetime.now() + timedelta(seconds=float(str(data['expires_in']))))\n credentials = Credentials.new_from_json(json_encode(credentials))\n user.update_credentials(credentials.to_json())\n user.sync()\n return credentials\n else:\n return None\n else:\n return None\n return None",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'clockwise.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatability with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials(servise: str) -> google.oauth2.credentials.Credentials:\n\n # SQL query to get the credentials for the current user from servise credentials table\n query = f\"\"\"\n SELECT token, token_uri, client_id, refresh_token, client_secret, scopes\n FROM {servise}_credentials\n WHERE user_id=?;\n \"\"\"\n\n # Get the credentials\n with connect(DATABASE) as db:\n credentials = db.execute(query, (session[\"user_id\"],)).fetchone()\n\n # Return None if it doesn't exist it the database\n if not credentials: return None\n\n # Transfer the credentials to a dictionary\n credentials_dict = {\n \"token\": credentials[0],\n \"token_uri\": credentials[1],\n \"client_id\": credentials[2],\n \"refresh_token\": credentials[3],\n \"client_secret\": credentials[4],\n \"scopes\": None if credentials[5] is None else credentials[5].split(\" \")\n }\n\n # Return a google Credentials object\n return google.oauth2.credentials.Credentials(**credentials_dict)",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'drive-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'drive-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def _get_credentials(flags):\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'drive-python-visualizerhelptext.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser(os.getcwd())\n credential_dir = os.path.join(home_dir, '.credentials')\n print(credential_dir)\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def auth_by_guid(self):\n self.console.debug(\"Auth by guid: %r\", self.guid)\n try:\n return self.console.storage.getClient(self)\n except KeyError, msg:\n self.console.debug('User not found %s: %s', self.guid, msg)\n return False",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def test_create_application_credential(self):\n app_cred = self.create_application_credential()\n\n # Check that the secret appears in the create response\n secret = app_cred['secret']\n\n # Check that the secret is not retrievable after initial create\n app_cred = self.non_admin_app_creds_client.show_application_credential(\n user_id=self.user_id,\n application_credential_id=app_cred['id']\n )['application_credential']\n self.assertNotIn('secret', app_cred)\n\n # Check that the application credential is functional\n _, resp = self.non_admin_token.get_token(\n app_cred_id=app_cred['id'],\n app_cred_secret=secret,\n auth_data=True\n )\n self.assertEqual(resp['project']['id'], self.project_id)",
"def get_credentials():\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,\r\n 'bis-python-quickstart.json')\r\n\r\n store = oauth2client.file.Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials",
"def get_credentials():\n scope = ['https://www.googleapis.com/auth/adsense.readonly',\n 'https://www.googleapis.com/auth/analytics.readonly']\n\n #get your client secret file\n cwd = os.getcwd()\n pathToFile = os.path.join(cwd,\n 'YOURCLIENTSECRETPATH.json')\n print \"This is your client secret path:\",pathToFile\n\n #first part of the folow process\n #https://developers.google.com/api-client-library/python/guide/aaa_oauth\n flow = oauth2client.client.flow_from_clientsecrets(pathToFile,scope,redirect_uri='urn:ietf:wg:oauth:2.0:oob')#'urn:ietf:wg:oauth:2.0:oob'\n \n #check to see if you have something already\n storage = oauth2client.file.Storage('creds.dat') #this is a made up file name\n credentials = storage.get()\n \n #if they dont exist already go ahead and get them\n if not credentials or credentials.invalid:\n #get authorization url\n auth_url = flow.step1_get_authorize_url()\n #open the url to get a code\n webbrowser.open(auth_url)\n\n #enter the code to reauth\n codeStr = str(raw_input('enter code here:'))\n credentials = flow.step2_exchange(codeStr)\n #save the code to the dat\n storage = oauth2client.file.Storage('creds.dat')\n storage.put(credentials)\n \n return credentials\n\n else:\n return credentials",
"def get_session(cred_file=\"mystic_creds.json\"):\n scope = ['https://spreadsheets.google.com/feeds',\n 'https://www.googleapis.com/auth/drive']\n credentials = ServiceAccountCredentials.from_json_keyfile_name(cred_file,\n scope)\n #print(credentials)\n gc = gspread.authorize(credentials)\n return gc",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(config['client secret file'], SCOPES)\n flow.user_agent = APPLICATION_NAME\n if args:\n credentials = tools.run_flow(flow, store, args)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials"
] | [
"0.577868",
"0.57673126",
"0.5747401",
"0.5735007",
"0.5692768",
"0.5685396",
"0.5674494",
"0.56565887",
"0.5637241",
"0.5617159",
"0.5594525",
"0.55890554",
"0.5583921",
"0.55628043",
"0.5555722",
"0.554597",
"0.55318135",
"0.552191",
"0.552191",
"0.55188334",
"0.55112743",
"0.5504087",
"0.54698825",
"0.54657775",
"0.54657775",
"0.54656106",
"0.5455795",
"0.5450166",
"0.5429775",
"0.5414853"
] | 0.62820846 | 0 |
Remove the locally stored credentials | def remove_client_credentials(self):
if self._dry_run:
return
os.unlink(self._store_pathname) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_credential(credentials):\n credentials.delete_credentials()",
"def delete_credentials(self):\n Credentials.credentials_list.remove(self)",
"def delete_credentials(self):\n Credentials.credentials_list.remove(self)",
"def delete_credentials(self):\n Credentials.credentials_list.remove(self)",
"def delete_credentials(self):\n Credentials.credential_list.remove(self)",
"def remove_credentials(self, conjurrc: ConjurrcData):\n self.credentials_provider.remove_credentials(conjurrc)",
"def cleanup_credentials(self, conjurrc: ConjurrcData):\n self.credentials_provider.cleanup_if_exists(conjurrc.conjur_url)",
"def delete_credential(self):\n Credentials.credentials_list.remove(self)",
"def remove_credentials(service: str) -> None:\n\n # SQL query to remove the user servise credentials from the database\n query = f\"DELETE FROM {service}_credentials WHERE user_id=?;\"\n\n # Execute the query\n with connect(DATABASE) as db:\n db.execute(query, (session[\"user_id\"],))\n db.commit()",
"def unset_credentials(ctx, user, store):\n try:\n logger.debug(\"store={store}, user={user}\".format(store=store, user=user))\n _pycred.unset_credentials(store, user)\n except Exception as e:\n logger.debug(e, exc_info=True)\n print('Error: {msg}'.format(msg=str(e)), file=sys.stderr)\n sys.exit(1)",
"def tearDown(self):\n Credentials.cred_list = []",
"def tearDown(self):\n Credentials.credentials_list = []",
"def tearDown(self):\n Credentials.credentials_list = []",
"def tearDown(self):\n Credentials.credential_list = []",
"def logout(self):\n self._client.clear_credentials()",
"def reset_user(self):\n\n if self.resin.auth.is_logged_in():\n self.wipe_application()\n self.resin.models.key.base_request.request(\n 'user__has__public_key', 'DELETE',\n endpoint=self.resin.settings.get('pine_endpoint'), login=True\n )",
"def destroy(self):\n\t\tos.remove(self.account_file)",
"def reset_credentials(self):\n credentials = {}\n with open(self.credentials_file, 'w') as fh_credentials:\n fh_credentials.write(json.dumps(credentials))",
"def cleanup(name, client=None):\n credential_specs_path = _get_path(client)\n path = os.path.join(credential_specs_path, name + '.json')\n fs.rm_safe(path)",
"def delete_credential(self):\n Credential.credential_list.remove(self)",
"def revoke(self):\n # Removes credentialing from the user\n with transaction.atomic():\n self.revoked_datetime = timezone.now()\n\n self.migrated_user.is_credentialed = False\n self.migrated_user.credential_datetime = None\n\n self.migrated_user.save()\n self.save()\n\n logger.info('Credentialing for user {0} has been removed.'.format(\n self.migrated_user.email))",
"def userdel(pwfile, user):\n return __salt__[\"webutil.userdel\"](pwfile, user)",
"def cleanup(self,context,result):\n if self.do_cleanup:\n try:\n return_code, stdout, stderr= runProgram([context.gsec_path,\n \"-user\", context.user_name,\n \"-password\", context.user_password,\n \"-delete\", self.user_name],[])\n except:\n result.note_exception(cause=\"Resource cleanup: Can't remove user.\")\n result[\"user_name\"] = self.user_name\n return\n else:\n if return_code != 0:\n self.fail_and_annotate_streams(result, Result.ERROR,'GSEC','Delete user',\n stdout,stderr)",
"def delete_credential(self):\n\n Credential.credential_list.remove(self)",
"def clear_datastore():\n local('lib/remote_api_shell.py tweetlocker -p /_/shell -c '\n '\"from lib.utils import clear_datastore; clear_datastore()\"',\n capture=False)",
"def remove_all_credentials(self, authenticator_id):\n pass",
"def test_delete_creds(self):\n self.new_credentials.save_creds()\n self.new_credentials.delete_creds()\n\n self.assertEqual(len(Credentials.credential_list),0)",
"def remove_user(self):\n self.currentuser = None\n self.carlocked = False",
"def revoke(self):\n # Set the application as unsucessful with the current datetime\n self.status = self.Status.REVOKED\n self.revoked_datetime = timezone.now()\n\n # Removes credentialing from the user\n self.user.is_credentialed = False\n self.user.credential_datetime = None\n\n with transaction.atomic():\n self.user.save()\n self.save()\n\n logger.info('Credentialing for user {0} has been removed.'.format(\n self.user.email))",
"def tearDown(self):\n Credential.credential_list = []"
] | [
"0.7707641",
"0.75258327",
"0.75258327",
"0.75258327",
"0.74404943",
"0.7261854",
"0.70868516",
"0.7018398",
"0.6845732",
"0.67399734",
"0.67137116",
"0.6710589",
"0.6710589",
"0.6691028",
"0.65997237",
"0.65893865",
"0.6577486",
"0.6569618",
"0.6546576",
"0.6540298",
"0.6538002",
"0.64331555",
"0.63910246",
"0.63592005",
"0.63511455",
"0.6337634",
"0.6319224",
"0.62702817",
"0.6240253",
"0.6226302"
] | 0.8074535 | 0 |
Returns the credential store if the file exists | def _load_credential_store(self):
try:
return shelve.open(self._store_pathname)
except Exception:
raise CredentialError('Unable to open credential store: ' + self._store_pathname) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_creds_file(self):\n filename = self.filename\n\n home = str(Path.home())\n filepath = home + os.sep + filename\n self.path = filepath\n if not os.path.isfile(filepath):\n return False\n\n j = json.load(open(filepath))\n self.keys = j\n return j",
"def get_credentials(self):\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir, self.CRED_FILENAME)\r\n \r\n store = Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(self.CLIENT_SECRET_FILE, self.SCOPES)\r\n flow.user_agent = self.APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir, 'google-photos-stats.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store, flags)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials(self):\n home_dir = os.path.expanduser(\"~\")\n credential_dir = os.path.join(home_dir, \".credentials\")\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir, \"autoto.json\")\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store, self.auth_flags)\n print(\"Storing credentials to \" + credential_path)\n return credentials",
"def get_credentials():\n credentials_path = os.path.join(CREDENTIALS_DIR, CREDENTIALS_FILE)\n store = oauth2client.file.Storage(credentials_path)\n credentials = store.locked_get()\n\n if not credentials or credentials.invalid:\n client_secret_path = os.path.join(CREDENTIAL_DIR, CLIENT_SECRET_FILE)\n flow = client.flow_from_clientsecrets(client_secret_path, \n scope='https://www.googleapis.com/auth/admin.directory.resource.calendar',\n redirect_uri='urn:ietf:wg:oauth:2.0:oob')\n\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n\n print(\"Storing credentials to: \" + credentials_path)\n\n\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'clockwise.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatability with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n\thome_dir = os.path.expanduser('~')\n\tcredential_dir = os.path.join(home_dir, '.credentials')\n\tif not os.path.exists(credential_dir):\n\t\tos.makedirs(credential_dir)\n\tcredential_path = os.path.join(credential_dir, \n\t\t\t\t\t\t\t\t\t'facebook_updater.json')\n\t\t\t\t\t\t\t\t\t\n\tstore = oauth2client.file.Storage(credential_path)\n\tcredentials = store.get()\n\tif not credentials or credentials.invalid:\n\t\tflow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n\t\tflow.user_agent = APPLICATION_NAME\n\t\tif flags:\n\t\t\tcredentials = tools.run_flow(flow, store, flags)\n\t\tprint ('Storing credentials to ' + credential_path)\n\treturn credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'admin-directory_v1-NestedGroupSync.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatability with Python 2.6\n credentials = tools.run(flow, store)\n print 'Storing credentials to' + credential_path\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sally.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'client_secret_OCR.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n print(\"Current folder: \" + os.getcwd())\n flow = client.flow_from_clientsecrets(\n \"../../\" + CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'appsactivity-python-showtime.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n print('Storing credentials to ' + credential_path)\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'credentialv_modify.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,\r\n 'bis-python-quickstart.json')\r\n\r\n store = oauth2client.file.Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials",
"def get(self):\n self._lock.acquire()\n try:\n f = open(self._filename, 'r')\n credentials = pickle.loads(f.read())\n f.close()\n credentials.set_store(self.put)\n except:\n credentials = None\n self._lock.release()\n\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'grader.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store, tools.argparser.parse_args(args=[]))\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'thejam_calendar.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,\r\n 'calendar-python-quickstart.json')\r\n\r\n store = oauth2client.file.Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials",
"def get_credentials():\n credential_dir = os.getcwd()\n credential_path = os.path.join(credential_dir,\n 'smarking_error_check.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir, 'fb-drive.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,'drive-python-quickstart.json')\r\n\r\n store = Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials",
"def check_for_credential_file(self):\r\n if 'AWS_CREDENTIAL_FILE' in os.environ:\r\n path = os.environ['AWS_CREDENTIAL_FILE']\r\n path = os.path.expanduser(path)\r\n path = os.path.expandvars(path)\r\n if os.path.isfile(path):\r\n fp = open(path)\r\n lines = fp.readlines()\r\n fp.close()\r\n for line in lines:\r\n if line[0] != '#':\r\n if '=' in line:\r\n name, value = line.split('=', 1)\r\n if name.strip() == 'AWSAccessKeyId':\r\n if 'aws_access_key_id' not in self.args:\r\n value = value.strip()\r\n self.args['aws_access_key_id'] = value\r\n elif name.strip() == 'AWSSecretKey':\r\n if 'aws_secret_access_key' not in self.args:\r\n value = value.strip()\r\n self.args['aws_secret_access_key'] = value\r\n else:\r\n print 'Warning: unable to read AWS_CREDENTIAL_FILE'",
"def get_credentials():\n credential_dir = os.path.realpath('.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path) # stores the users credentials --> TODO: put in database\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n\n credentials = tools.run_flow(flow, store, flags)\n\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'reseller-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,\r\n 'gmail-python-spam-filter.json')\r\n\r\n store = Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(config['client secret file'], SCOPES)\n flow.user_agent = APPLICATION_NAME\n if args:\n credentials = tools.run_flow(flow, store, args)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser(os.getcwd())\n credential_dir = os.path.join(home_dir, '.credentials')\n print(credential_dir)\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_creds(profile, conf):\n if profile:\n store = file.Storage(conf.get_profile(profile))\n return store.get()\n elif len(conf.list_profiles()) > 0:\n store = file.Storage(conf.get_profile(conf.list_profiles()[0]))\n return store.get()\n else:\n return None",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials"
] | [
"0.70948315",
"0.70730686",
"0.70023566",
"0.6916191",
"0.6912367",
"0.69121444",
"0.689295",
"0.6887308",
"0.6858863",
"0.68382764",
"0.68215716",
"0.6797262",
"0.6796208",
"0.6744389",
"0.67271996",
"0.6713757",
"0.67029476",
"0.6693516",
"0.6684445",
"0.6670447",
"0.66593444",
"0.66559684",
"0.66557944",
"0.6639159",
"0.6606256",
"0.66017026",
"0.6597159",
"0.6588707",
"0.6588707",
"0.6588707"
] | 0.73352385 | 0 |
Flushes and closes the credential store | def _save_credential_store(self, store):
store.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def close(self):\n self.save()\n # self.fileKey = None\n if self.openAccount:\n self.openAccount.close()\n self.openAccount = None",
"def close(self):\n self.password = None\n self.session.close()",
"async def aclose(self) -> None:\n\t\tawait self._stores_cleanup()",
"def close(self):\n if self.authenticated:\n self.db.logout()\n if self.connection is not None:\n self.connection.close()",
"def close(self):\n self._flush()\n self.database.close()",
"def disconnect(self):\n self._save_database()\n self.dbh.close()\n self._remove_temporary_file()",
"def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()\n try:\n self._store.close()\n except AttributeError:\n pass",
"def manual_exit(self):\n self._dbconnect.commit()\n self._dbconnect.close()\n self._dbconnect = None\n self._cursor = None",
"def close(self):\n self.db.commit()\n self.db.close()",
"def storage_close(self):\n storage.close()",
"def __del__(self):\r\n self.save()\r\n self.close()",
"def close(self):\n self._close()\n self._closed = True\n self._auth_error = self._AUTH_ERROR_MESSAGES[0]\n self.login_info = None",
"def disconnect(self):\n self.db.close()",
"def tear(exc):\n storage.close()",
"def close(self):\n self._store.close()",
"def perform_teardown():\n global credentials, connection, channel\n connection.close()",
"def exit(self):\n if self._user != None:\n self._user.logged_in = False\n \n with open('users_db.pkl', 'wb') as file:\n for user in self.auth.users:\n pickle.dump(user, file, pickle.HIGHEST_PROTOCOL)\n print(\"Thank you for using Mini-WhatsApp :D\")\n print(\"Closing .....\")\n sys.exit()",
"async def close(self):\n print('Close {}'.format(str(self.__hash__)))\n await self.db.close()",
"def close_login(self):\n self.login.destroy()",
"async def close(self) -> None:\n\n # for conn_handle in self._conn_handles:\n # await agent.agent_close_connection(conn_handle)\n # self._conn_handles.clear()\n await wallet.close_wallet(self.wallet_handle)",
"def close(self):\r\n _logger.debug(\"Closing sessions...\")\r\n dbs = self._sessions.keys()\r\n while len(dbs) > 0:\r\n session = self._sessions.pop(dbs.pop())\r\n session.close()\r\n if self.__provider is not None:\r\n self.__provider.close()\r\n self.__provider = None",
"def flush_account(self):\n if self.data_channel:\n if not self.data_channel.transfer_in_progress():\n self.data_channel.close()\n self.data_channel = None\n if self.data_server:\n self.data_server.close()\n self.data_server = None\n\n self.fs.rnfr = None\n self.authenticated = False\n self.username = \"\"\n self.attempted_logins = 0\n self.current_type = 'a'\n self.restart_position = 0\n self.quit_pending = False\n self.in_dtp_queue = None\n self.out_dtp_queue = None\n\n\n # --- connection",
"def __exit__(self, type, value, traceback):\n self.save_resource_statuses()\n self.save_user_metadata()\n if self._session:\n self._session.close()\n self.api.close()\n # TODO: restore from local backup of file in case we can't upload and have to roll back",
"def cleanup(self):\r\n # XXX should be fixed properly!!!\r\n try:\r\n self.unlock()\r\n except:\r\n pass",
"def finish(self):\n self._resource_storage.close()",
"def close(db):\n storage.close()",
"def close(self):\n self.connection.commit()\n self.cursor.close()\n self.connected = False",
"def close(self):\n for k, v in six.iteritems(self.old_values):\n if v is None:\n self.server.deleteUserData(k)\n else:\n self.server.setUserData(k, v)\n self.old_values.clear()",
"def teardown_db(exception):\n storage.close()",
"def teardown_db(exception):\n storage.close()"
] | [
"0.6961587",
"0.6681481",
"0.65697914",
"0.6494412",
"0.6471906",
"0.64025295",
"0.6352745",
"0.63433146",
"0.6317204",
"0.6268953",
"0.62491477",
"0.6214849",
"0.62142926",
"0.62142706",
"0.6199774",
"0.61757946",
"0.61647165",
"0.61625445",
"0.614162",
"0.6118002",
"0.61150354",
"0.61041737",
"0.6094607",
"0.6089279",
"0.60775703",
"0.6077446",
"0.60722965",
"0.6070996",
"0.60660094",
"0.60660094"
] | 0.76500064 | 0 |
Extract location from FX node stack trace. | def _location_from_fx_stack_trace(
node_stack_trace: str,
) -> Optional[diagnostics.infra.Location]:
if "File" not in node_stack_trace:
return None
lines = node_stack_trace.strip().split("\n")
idx = 0
while idx < len(lines) and "File" not in lines[idx]:
idx += 1
if idx + 1 >= len(lines):
return None
pattern = re.compile(r"^File \"(.+)\", line (\d+), in (.+)$")
matches = pattern.match(lines[idx].strip())
if matches:
uri = matches.group(1)
line_number = int(matches.group(2))
snippet = lines[idx + 1].strip()
return diagnostics.infra.Location(uri=uri, line=line_number, snippet=snippet)
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_node_loc(node):\n lineno = node.lineno\n end_lineno = get_last_deep_child(node).lineno\n return end_lineno - lineno",
"def frame_location_info(self):\n\n return str(self.active_frame.f_code.co_filename) + \":\" + str(self.active_frame.f_lineno)",
"def getStackPosition(self):\r\n return self.callstack.getStack()",
"def get_function_loc(self):\n return Gumtree.gumtree.getFunctionLoc()",
"def extract_detail():\r\n tb = sys.exc_info()[-1]\r\n stk = traceback.extract_tb(tb, -1)[0]\r\n return \"{} in {} line num {} on line {} \".format(\r\n stk.name, stk.filename, stk.lineno, stk.line\r\n )",
"def trace():\n import traceback, inspect\n tb = sys.exc_info()[2]\n tbinfo = traceback.format_tb(tb)[0]\n filename = inspect.getfile(inspect.currentframe())\n # script name + line number\n line = tbinfo.split(\", \")[1]\n # Get Python syntax error\n #\n synerror = traceback.format_exc().splitlines()[-1]\n return line, filename, synerror",
"def _sourceFrame(self):\n try:\n raise Exception('catch me') # forced exception to get stack traceback\n except:\n exc_traceback = sys.exc_info()[2]\n return exc_traceback.tb_frame.f_back.f_back.f_back.f_back\n #endTry",
"def _get_xblock_loc(self):\n return str(self.location).split('@')[-1]",
"def location(self, obj):\n if obj.node.location:\n return obj.node.location\n return None",
"def find_traceback_start(self):\n ### FILL IN ###",
"def trace():\n import traceback\n tb = sys.exc_info()[2]\n tbinfo = traceback.format_tb(tb)[0]\n # script name + line number\n line = tbinfo.split(\", \")[1]\n # Get Python syntax error\n #\n synerror = traceback.format_exc().splitlines()[-1]\n return line, __file__, synerror",
"def print_callsite_location():\n fi = inspect.getouterframes( inspect.currentframe() )[2]\n print(\"{path}:{line} {fname}\".format(\n line=fi.lineno, path=fi.filename, fname=fi.function))",
"def stacktrace(self):\n stacktrace = self.StacktraceParser().Parse(\n self._raw_stacktrace,\n self._dependency_analyzer.regression_version_deps,\n signature=self.signature, top_n_frames=self._top_n_frames)\n if not stacktrace:\n logging.warning('Failed to parse the stacktrace %s',\n self._raw_stacktrace)\n return stacktrace",
"def trace_stack_top(trace_stack_var: ContextVar) -> Any | None:\n trace_stack = trace_stack_var.get()\n return trace_stack[-1] if trace_stack else None",
"def getframeinfo(frame, context=1):\r\n if istraceback(frame):\r\n lineno = frame.tb_lineno\r\n frame = frame.tb_frame\r\n else:\r\n lineno = frame.f_lineno\r\n if not isframe(frame):\r\n raise TypeError('{!r} is not a frame or traceback object'.format(frame))\r\n\r\n filename = getsourcefile(frame) or getfile(frame)\r\n if context > 0:\r\n start = lineno - 1 - context//2\r\n try:\r\n lines, lnum = findsource(frame)\r\n except IOError:\r\n lines = index = None\r\n else:\r\n start = max(start, 1)\r\n start = max(0, min(start, len(lines) - context))\r\n lines = lines[start:start+context]\r\n index = lineno - 1 - start\r\n else:\r\n lines = index = None\r\n\r\n return Traceback(filename, lineno, frame.f_code.co_name, lines, index)",
"def extract_function_name(maybe_function_str: str) -> Optional[str]:\n match = STACK_TRACE_LINE_RE.search(maybe_function_str)\n if match is not None:\n return match.group(2)\n return None",
"def get_linenumber():\n\n # inspect.stack()[0][2] returns line number in this function\n lineno = str(inspect.stack()[1][2])\n\n return lineno",
"def get_cur_info():\n try:\n raise Exception\n except:\n f = sys.exc_info()[2].tb_frame.f_back\n # return (f.f_code.co_name, f.f_lineno)\n return f.f_code.co_name",
"def get_line(cls, frame, sys_context=None):\n\t\tcode = cls._dispatch_frame(frame)\n\n\t\tif not code: \n\t\t\treturn ''\n\t\t\n\t\treturn code.splitlines()[frame.f_lineno]",
"def _get_caller_detail(n=2):\n if not _show_caller_details:\n return None\n s = inspect.stack()[:n + 1]\n try:\n frame = s[n]\n try:\n return frame[1]\n # WARNING(dhellmann): Using frame.lineno to include the\n # line number in the return value causes some sort of\n # memory or stack corruption that manifests in values not\n # being cleaned up in the cfgfilter tests.\n # return '%s:%s' % (frame[1], frame[2])\n finally:\n del frame\n finally:\n del s",
"def position(self):\n return self.stack.position()",
"def GetFunctionName():\n return traceback.extract_stack(None, 2)[0][2]",
"def getTraceback(self):\n self.mostLikelyPath = zeros((1, self.T+2))\n\n self.mostLikelyPath[0,0] = 0\n self.mostLikelyPath[0,-1] = self.noOfEmmittingStates+1\n\n for s in range(self.T, 0, -1):\n self.mostLikelyPath[0,s] = self.traceback[self.mostLikelyPath[0,s+1]-1, s]",
"def _findCaller(stack_info=False):\n f = logging.currentframe()\n #On some versions of IronPython, currentframe() returns None if\n #IronPython isn't run with -X:Frames.\n if f is not None:\n f = f.f_back\n rv = \"(unknown file)\", 0, \"(unknown function)\", None\n while hasattr(f, \"f_code\"):\n co = f.f_code\n filename = os.path.normcase(co.co_filename)\n if filename == logging._srcfile:\n f = f.f_back\n continue\n sinfo = None\n if stack_info:\n sio = io.StringIO()\n sio.write('Stack (most recent call last):\\n')\n traceback.print_stack(f, file=sio)\n sinfo = sio.getvalue()\n if sinfo[-1] == '\\n':\n sinfo = sinfo[:-1]\n sio.close()\n rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)\n break\n return rv",
"def lineno():\n return str(' - IpAddr - line number: '+str(inspect.currentframe().f_back.f_lineno))",
"async def location(self):\n if not hasattr(self, \"_location\"):\n self._location = await Stack.fetch_stack_value(self, \"http://usefulinc.com/ns/doap#location\", await self.uuid)\n return self._location",
"def get_caller_context(depth=None, **kwarg):\r\n if TIK_ERROR_MSG.api_source_info is not None:\r\n return TIK_ERROR_MSG.api_source_info\r\n if depth is None:\r\n raise RuntimeError(\"There are two reasons for the error:\\n\"\r\n \"If it is called by the user, please register source\"\r\n \" info before entering decorators;\\n\"\r\n \"If it is an internal call, please specify \"\r\n \"the stack depth;\")\r\n additional_stack = kwarg.get('stack_depth', 0)\r\n depth += additional_stack\r\n if ERROR_MSG_LEVEL.err_msg_level == 0:\r\n caller = stack(depth)\r\n else:\r\n caller = current_frame(depth)\r\n return caller",
"def lineno():\n return \"line \" + str(inspect.currentframe().f_back.f_lineno) + \": \"",
"def _extract_thread_stack_trace(\n self, thread: str, lines: List[str]\n ) -> Optional[List[str]]:\n thread_str = f\"Thread {thread} \"\n i: int = 0\n while i < len(lines) and thread_str not in lines[i]:\n i += 1\n if i != len(lines) and thread_str in lines[i]:\n j: int = i\n while j < len(lines) and lines[j] != \"\\n\":\n j += 1\n start = i - 1\n end = j\n return lines[start:end]\n return None",
"def read_stack_pointer(self):\n return self.STACK_POINTER"
] | [
"0.64482874",
"0.6389762",
"0.63267493",
"0.62837934",
"0.61853236",
"0.61039037",
"0.6071818",
"0.6055752",
"0.60536265",
"0.60212994",
"0.6014216",
"0.5930068",
"0.58694696",
"0.57997334",
"0.5782252",
"0.5774222",
"0.5765356",
"0.57514143",
"0.57455015",
"0.57362705",
"0.5728517",
"0.5722218",
"0.5710176",
"0.5709032",
"0.57060003",
"0.5701151",
"0.5665797",
"0.5658573",
"0.56384474",
"0.561581"
] | 0.79784185 | 0 |
Map FX value to TorchScript value. When creating TorchScript graph from FX graph, we need a mapping from FX variable to TorchScript variable. This function maps FX variable, fx_node_arg, to torch.jit.Value. | def _retrieve_or_adapt_input_to_graph_set(
fx_node_arg: fx_type_utils.Argument,
fx_name_to_onnxscript_value: Dict[
str,
Union[
onnxscript_graph_building.TorchScriptTensor,
Tuple[onnxscript_graph_building.TorchScriptTensor, ...],
],
],
tracer: onnxscript_graph_building.TorchScriptTracingEvaluator,
):
onnx_tensor = fx_node_arg
if isinstance(onnx_tensor, torch.fx.Node):
# 1. fx_node_arg is a torch.fx.Node, which means
# fx_node_arg stands for the output of that torch.fx.Node.
# 2. fx_node_arg (variable in torch.fx.Graph) is be mapped to
# torch.jit.Value, fx_name_to_onnxscript_value[fx_node_arg.name],
# in TorchScript graph.
return fx_name_to_onnxscript_value[onnx_tensor.name]
if isinstance(onnx_tensor, (tuple, list)) and any(
isinstance(node, torch.fx.Node)
and isinstance(node.meta.get("val"), torch.SymInt)
for node in onnx_tensor
):
# This intends to handle dynamic axes. for example, if the input size of op.Expand
# is dynamic, each dimension would be variable (i.e., sym variable in Pytorch
# FX graph. Note that sym variable is mapped to tensor in ONNX Script world)
# calculated by other operators.
sequence_mixed_elements: List[
Union[
onnxscript_graph_building.TorchScriptTensor,
List[int],
]
] = []
for tensor in onnx_tensor:
if isinstance(tensor, torch.fx.Node) and isinstance(
tensor.meta.get("val"), torch.SymInt
):
sequence_mixed_elements.append(fx_name_to_onnxscript_value[tensor.name])
elif isinstance(tensor, int):
# NOTE: op.Concat doesn't support scalar, so we need to wrap it with
# dim, and onnx-script will promote it to tensot(int64)
sequence_mixed_elements.append([tensor])
# Concat all the elements in the sequence.
# shapes are mapped to tensors in ONNX graph (TorchScriptGraph),
# so list of sym_ints is concatenated to a tensor before calling ONNX op.
# For example:
# inputs: [[2], [4], fx.Node(SymIntA), [1], fx.Node(SymIntB)]
# outputs: op.Concat([op.Constant(2), op.Constant(4), TorchScriptTensor(A), op.Constant(1), TorchScriptTensor(B)])
# onnx-script auto wraps python number with op.Constants,
# so we don't need to specifically process them.
with onnxscript.evaluator.default_as(tracer):
output = onnxscript.opset18.Concat(*sequence_mixed_elements, axis=0)
output.dtype = torch.int64
output.shape = [len(sequence_mixed_elements)]
return output
elif isinstance(onnx_tensor, (tuple, list)) and all(
isinstance(node, torch.fx.Node) or node is None for node in onnx_tensor
):
sequence_elements: List[
Union[
Optional[onnxscript_graph_building.TorchScriptTensor],
Tuple[
onnxscript_graph_building.TorchScriptTensor,
...,
],
]
] = []
for tensor in onnx_tensor:
sequence_elements.append(
fx_name_to_onnxscript_value[tensor.name] if tensor is not None else None
)
return sequence_elements
if isinstance(onnx_tensor, torch.dtype):
onnx_tensor = int(
jit_type_utils.JitScalarType.from_dtype(onnx_tensor).onnx_type()
)
# NOTE: if device is specified in kwargs (not consumed), it's free to ignored. But
# if it's in args, we need to set it to string for dispatcher to match schema.
if isinstance(onnx_tensor, torch.device):
# torch.device is not supported by onnxscript (no op). We turn it into
# a string.
return str(onnx_tensor)
# all other cases, we do nothing.
return onnx_tensor | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def call_module(\n self,\n node: torch.fx.Node,\n parent_onnxscript_graph: onnxscript_graph_building.TorchScriptGraph,\n fx_name_to_onnxscript_value: Dict[\n str,\n Union[\n onnxscript_graph_building.TorchScriptTensor,\n Tuple[onnxscript_graph_building.TorchScriptTensor, ...],\n ],\n ],\n tracer: onnxscript_graph_building.TorchScriptTracingEvaluator,\n root_fx_graph_module: torch.fx.GraphModule,\n onnxfunction_dispatcher: onnxfunction_dispatcher.OnnxFunctionDispatcher,\n op_level_debug: bool,\n ) -> None:\n assert isinstance(\n node.target, str\n ), f\"node.target must be a str, not {type(node.target)} for node {node}.\"\n\n sub_module = root_fx_graph_module.get_submodule(node.target)\n\n assert isinstance(\n sub_module, torch.fx.GraphModule\n ), f\"sub_module must be a torch.fx.GraphModule, not {type(sub_module)} for node {node}.\"\n\n sub_onnxscript_graph = self.run(\n sub_module, onnxfunction_dispatcher, op_level_debug, parent_onnxscript_graph\n )\n\n onnx_args, _ = _wrap_fx_args_as_onnxscript_args(\n list(node.args), {}, fx_name_to_onnxscript_value, tracer\n )\n\n # TODO: We may want to consider other naming styles. The goal is to be stable and\n # unique such that it can be easily identified in case of kernel substitution.\n # Example for current style is combination of qualified module class name and\n # module attribute name: `torch_nn_modules_conv_Conv2d_conv1`.\n # Other naming styles such as qualified module class name made unique can also\n # be considered.\n unique_module_name = f\"{sub_module._get_name()}_{node.target}\"\n\n outputs: Union[ # type: ignore[no-redef]\n onnxscript_graph_building.TorchScriptTensor,\n Tuple[onnxscript_graph_building.TorchScriptTensor, ...],\n ] = parent_onnxscript_graph.add_module_call(\n unique_module_name, sub_onnxscript_graph, onnx_args\n )\n\n assert isinstance(\n outputs, (onnxscript_graph_building.TorchScriptTensor, tuple)\n ), f\"Unexpected outputs type {type(outputs)} for node {node}.\"\n\n _fill_tensor_shape_type(outputs, node.name, node.meta[\"val\"])\n fx_name_to_onnxscript_value[node.name] = outputs\n\n # Skip op_level_validation for call_module. Subgraph nodes are validated individually.",
"def run(\n self,\n fx_graph_module: torch.fx.GraphModule,\n onnxfunction_dispatcher: onnxfunction_dispatcher.OnnxFunctionDispatcher,\n op_level_debug: bool,\n parent_onnxscript_graph: Optional[\n onnxscript_graph_building.TorchScriptGraph\n ] = None,\n ) -> onnxscript_graph_building.TorchScriptGraph:\n onnxscript_graph = onnxscript_graph_building.TorchScriptGraph(\n parent_onnxscript_graph\n )\n onnxscript_tracer = onnxscript_graph_building.TorchScriptTracingEvaluator(\n onnxscript_graph\n )\n # In the following loop, a TorchScript graph is created to\n # represent the input FX graph with ONNX symbols (e.g., onnx::add).\n # To connect the values to nodes in the TorchScript graph, we maintian\n # fx_name_to_onnxscript_value. Basically, we want to translate\n # fx_tensor_x (type: torch.fx.Node) -> fx_node_1 -> fx_tensor_y (type: torch.fx.Node)\n # to\n # fx_name_to_onnxscript_value[fx_tensor_x.name] -> onnx_node_1 -> fx_name_to_onnxscript_value[fx_tensor_y.name]\n fx_name_to_onnxscript_value: Dict[\n str,\n Union[\n onnxscript_graph_building.TorchScriptTensor,\n Tuple[onnxscript_graph_building.TorchScriptTensor, ...],\n ],\n ] = {}\n\n # TODO: Fix FakeTensorMode limitation asap\n # We want to pass list of ints and floats to TorchScript graph correctly\n # in _export_fx_to_ts, so we must disable FakeTensorMode. Otherwise, graph may\n # receive FakeTensor and results runtime error. In addition, TorchScript-based\n # ONNX exporter used in _ts_graph_to_onnx_model_in_protobuf is not compatible\n # with FakeTensorMode.\n with torch.utils._mode_utils.no_dispatch():\n # node_fixed_shape is only used on op_level_debug purpose.\n for node in fx_graph_module.graph.nodes:\n self.run_node(\n node,\n fx_graph_module,\n onnxfunction_dispatcher,\n op_level_debug,\n onnxscript_graph,\n onnxscript_tracer,\n fx_name_to_onnxscript_value,\n )\n\n return onnxscript_graph",
"def run_node(\n self,\n node,\n fx_graph_module: torch.fx.GraphModule,\n onnxfunction_dispatcher: onnxfunction_dispatcher.OnnxFunctionDispatcher,\n op_level_debug: bool,\n onnxscript_graph: onnxscript_graph_building.TorchScriptGraph,\n onnxscript_tracer: onnxscript_graph_building.TorchScriptTracingEvaluator,\n fx_name_to_onnxscript_value: Dict[\n str,\n Union[\n onnxscript_graph_building.TorchScriptTensor,\n Tuple[onnxscript_graph_building.TorchScriptTensor, ...],\n ],\n ],\n ):\n # Record stack trace of node in diagnostic.\n node_stack_trace = node.stack_trace\n if node_stack_trace:\n diagnostic = self.diagnostic_context.inflight_diagnostic(\n rule=diagnostics.rules.fx_node_to_onnx\n )\n diagnostic.with_additional_message(\n f\"### PyTorch source information\\n```\\n{node_stack_trace}\\n```\"\n )\n location = _location_from_fx_stack_trace(node_stack_trace)\n if location is not None:\n diagnostic.with_location(location)\n\n if node.op == \"placeholder\":\n self.placeholder(node, onnxscript_graph, fx_name_to_onnxscript_value)\n elif node.op == \"get_attr\":\n self.get_attr(\n node,\n onnxscript_graph,\n fx_name_to_onnxscript_value,\n fx_graph_module,\n )\n elif node.op == \"call_function\":\n self.call_function(\n node,\n onnxscript_tracer,\n fx_name_to_onnxscript_value,\n onnxfunction_dispatcher,\n op_level_debug,\n fx_graph_module,\n )\n elif node.op == \"call_method\":\n self.call_method(node)\n elif node.op == \"call_module\":\n self.call_module(\n node,\n onnxscript_graph,\n fx_name_to_onnxscript_value,\n onnxscript_tracer,\n fx_graph_module,\n onnxfunction_dispatcher,\n op_level_debug,\n )\n elif node.op == \"output\":\n self.output(node, onnxscript_graph, fx_name_to_onnxscript_value)\n else:\n raise RuntimeError(f\"Found node type not defined in torch.fx: {node.op}\")",
"def map_value(self) -> global___Expression.MapValue:",
"def convert(value):\n if isinstance(value, (Function, NodeBase)):\n return value\n\n if callable(value):\n return _convert_tvm_func(value)\n\n return _convert_to_node(value)",
"def set_node_value(node: Node, value: np.ndarray):\n if node.type != 'Const':\n raise Exception('Can\\'t set value for non-constant node {}'.format(node.name))\n data_type = np.float32\n if node.out_port(0).is_data_type_defined():\n data_type = node.out_port(0).get_data_type()\n node.out_port(0).data.set_value(np.array(value).astype(data_type))",
"def to_jit(self, *values, **kwargs):\n constraints = {f\"x{i}\": v.tensor_type_dims\n for i, v in enumerate(values)}\n if self.output_types is not None:\n constraints.update(self.output_types)\n inputs = [Input(f\"x{i}\") for i in range(len(values))]\n var = self.f(*inputs, **kwargs)\n onx = var.to_onnx(constraints=constraints,\n target_opsets=self.target_opsets,\n ir_version=self.ir_version)\n names = [f\"x{i}\" for i in range(len(values))]\n exe = self.tensor_class.create_function(names, onx)\n return onx, exe",
"def do_decode(self, value, decode_fn):\n del decode_fn\n tensor_proto = value.tensor_value\n tensor = constant(tensor_util.MakeNdarray(tensor_proto))\n return tensor",
"def compute_value(callback, graph):\n return callback(graph)",
"def update_action_value(self, state, action, value):\n self.value_function[to_table_index(state, action)] = value",
"def value(self, x: Any):",
"def forward(self, obs_variable, actions_variable):\n state_action = torch.cat((obs_variable, actions_variable), 1)\n q_val_variable = self.model(state_action)\n\n return q_val_variable",
"def assign_from_values_fn(var_names_to_values):\n assign_op, feed_dict = assign_from_values(var_names_to_values)\n def callback(session):\n return session.run(assign_op, feed_dict)\n return callback",
"def mapComponentValues(*args):\n return _libsbml.SBMLTransforms_mapComponentValues(*args)",
"def node_value(node, input_values, neuron_outputs): # PROVIDED BY THE STAFF\n if isinstance(node, str):\n # A string node (either an input or a neuron)\n if node in input_values:\n return input_values[node]\n if node in neuron_outputs:\n return neuron_outputs[node]\n raise KeyError(\"Node '{}' not found in either the input values or neuron outputs dictionary.\".format(node))\n \n if isinstance(node, (int, float)):\n # A constant input, such as -1\n return node\n \n raise TypeError(\"Node argument is {}; should be either a string or a number.\".format(node))",
"def transform_value(proxy_artifact, transformer_fn):\n transformed = copy.copy(proxy_artifact)\n transformed.__wrapped__ = transformer_fn(transformed)\n return transformed",
"def on_apply(self, node):\n if node.inputs[0].is_constant(Primitive):\n fn = node.inputs[0].value\n conv = MAP.get(fn)\n if conv is not None:\n return conv(self, *node.inputs[1:])\n return relay.Call(self.ref(node.inputs[0]),\n [self.ref(i) for i in node.inputs[1:]])",
"def set_nodes_values(self, node_dict):\n\n # Requires nodes to have type defined in lookup array\n raise Exception(\"Not yet implemented.\")",
"def __call__(self, x, **kwargs):\n x = as_tensor_variable(x)\n return super().__call__(x, dtype=x.dtype, **kwargs)",
"def on_graph(self, node):\n if node.value.parent is None:\n return self.graph_map[node.value]\n if node not in self.node_map:\n self.node_map[node] = self.convert_func(node.value)\n return self.node_map[node]",
"def Map(context, funcname, *nodesets):\n (prefix, local) = ExpandQName(funcname, namespaces=context.processorNss)\n func = (g_extFunctions.get(expanded) or\n CoreFunctions.CoreFunctions.get(expanded, None))\n if not func:\n raise Exception('Dynamically invoked function %s not found.'%funcname)\n flist = [f]*len(nodesets)\n lf = lambda x, f, *args: apply(f, args)\n retlist = apply(map, (lf, flist) + nodesets)\n\n proc = context.processor\n result_nodeset = []\n for ret in retlist:\n proc.pushResult()\n proc.writers[-1].text(Conversions.StringValue(ret))\n frag = proc.popResult()\n context.rtfs.append(frag)\n result_nodeset.append(frag.childNodes[0])\n return result_nodeset",
"def to_var( x):\n if torch.cuda.is_available():\n x = x.cuda()\n return Variable(x)",
"def _pymc_dists_to_value(self, args):\n # This is needed because the scipy rv function transforms\n # every input argument which causes new pymc lambda\n # functions to be generated. Thus, when calling this many\n # many times, excessive amounts of RAM are used.\n new_args = []\n for arg in args:\n if isinstance(arg, pm.Node):\n new_args.append(arg.value)\n else:\n new_args.append(arg)\n\n return new_args",
"def forward(\n self,\n x_dict: Dict[NodeType, Tensor],\n edge_index_dict: Dict[EdgeType, Adj] # Support both.\n ) -> Dict[NodeType, Optional[Tensor]]:\n F = self.out_channels\n H = self.heads\n D = F // H\n\n k_dict, q_dict, v_dict, out_dict = {}, {}, {}, {}\n\n # Compute K, Q, V over node types:\n kqv_dict = self.kqv_lin(x_dict)\n for key, val in kqv_dict.items():\n k, q, v = torch.tensor_split(val, 3, dim=1)\n k_dict[key] = k.view(-1, H, D)\n q_dict[key] = q.view(-1, H, D)\n v_dict[key] = v.view(-1, H, D)\n\n q, dst_offset = self._cat(q_dict)\n k, v, src_offset = self._construct_src_node_feat(\n k_dict, v_dict, edge_index_dict)\n\n edge_index, edge_attr = construct_bipartite_edge_index(\n edge_index_dict, src_offset, dst_offset, edge_attr_dict=self.p_rel)\n\n out = self.propagate(edge_index, k=k, q=q, v=v, edge_attr=edge_attr,\n size=None)\n\n # Reconstruct output node embeddings dict:\n for node_type, start_offset in dst_offset.items():\n end_offset = start_offset + q_dict[node_type].size(0)\n if node_type in self.dst_node_types:\n out_dict[node_type] = out[start_offset:end_offset]\n\n # Transform output node embeddings:\n a_dict = self.out_lin({\n k:\n torch.nn.functional.gelu(v) if v is not None else v\n for k, v in out_dict.items()\n })\n\n # Iterate over node types:\n for node_type, out in out_dict.items():\n out = a_dict[node_type]\n\n if out.size(-1) == x_dict[node_type].size(-1):\n alpha = self.skip[node_type].sigmoid()\n out = alpha * out + (1 - alpha) * x_dict[node_type]\n out_dict[node_type] = out\n\n return out_dict",
"def SBMLTransforms_mapComponentValues(*args):\n return _libsbml.SBMLTransforms_mapComponentValues(*args)",
"def _update(self, update_fn, value, **kwargs):\n input_tensor = ops.convert_to_tensor(\n value, name='value_in_tensor', dtype=self.dtype)\n\n return control_flow_ops.group(\n *tuple(\n _on_device_update(update_fn, v, input_tensor, **kwargs)\n for v in self.variables))",
"def SpssMapToVar(function_name, vars, outvars = None):\n if '%s' not in function_name:\n function_name += \"(%s)\"\n if outvars:\n if len(vars) != len(outvars):\n raise IndexError(\"number of input variables and output \"\n \"variables don't match\")\n else:\n outvars = vars\n syntax = []\n for old, new in zip(vars, outvars):\n rhs = function_name % old\n syntax += [\"compute %(new)s=%(rhs)s.\" % locals()]\n if __debug__:\n print syntax\n spss.Submit(syntax)\n\t# Does not perform EXECUTE",
"def _wrap_fx_args_as_onnxscript_args(\n complete_args: List[fx_type_utils.Argument],\n complete_kwargs: Dict[str, fx_type_utils.Argument],\n fx_name_to_onnxscript_value: Dict[\n str,\n Union[\n onnxscript_graph_building.TorchScriptTensor,\n Tuple[onnxscript_graph_building.TorchScriptTensor, ...],\n ],\n ],\n tracer: onnxscript_graph_building.TorchScriptTracingEvaluator,\n) -> Tuple[\n Sequence[\n Optional[\n Union[\n onnxscript_graph_building.TorchScriptTensor,\n str,\n int,\n float,\n bool,\n list,\n ]\n ]\n ],\n Dict[str, fx_type_utils.Argument],\n]:\n\n onnxscript_args = tuple(\n _retrieve_or_adapt_input_to_graph_set(arg, fx_name_to_onnxscript_value, tracer)\n for arg in complete_args\n )\n onnxscript_kwargs = filter_incompatible_and_dtype_convert_kwargs(complete_kwargs)\n\n return onnxscript_args, onnxscript_kwargs",
"def action_value(self, state, action):\n return self.value_function[to_table_index(state, action)]",
"def Value(self, *args):\n return _Adaptor3d.Adaptor3d_InterFunc_Value(self, *args)"
] | [
"0.60945934",
"0.5849507",
"0.56277806",
"0.55226827",
"0.51107115",
"0.5005654",
"0.49181044",
"0.48638496",
"0.48501047",
"0.48006842",
"0.47544688",
"0.47219574",
"0.46902397",
"0.46803787",
"0.46616042",
"0.46607846",
"0.46585023",
"0.46428233",
"0.4638741",
"0.4633322",
"0.46235868",
"0.4608691",
"0.45823455",
"0.45711404",
"0.45697808",
"0.45549878",
"0.45463327",
"0.45288587",
"0.45261562",
"0.45083964"
] | 0.6808531 | 0 |
Fill the meta information of onnxscript_values with that from the fx FakeTensor. | def _fill_tensor_shape_type(
onnxscript_values: Union[
onnxscript_graph_building.TorchScriptTensor,
Tuple[onnxscript_graph_building.TorchScriptTensor, ...],
],
name: str,
expected_values: Union[
fx_type_utils.META_VALUE_TYPE,
List[fx_type_utils.META_VALUE_TYPE],
Tuple[fx_type_utils.META_VALUE_TYPE, ...],
],
):
if isinstance(expected_values, (list, tuple)) and not isinstance(
onnxscript_values, (list, tuple)
):
# ex: aten::split - in onnx_dtype: seq(tensor)
# onnxscript_values is a single tensor, but expected_values is a list of tensors.
return
flat_onnxscript_values, _ = _pytree.tree_flatten(onnxscript_values)
flat_expected_values, _ = _pytree.tree_flatten(expected_values)
for i, (onnxscript_value, expected_value) in enumerate(
zip(flat_onnxscript_values, flat_expected_values)
):
# aten::sym_size output is a int, not a tensor, which stands
# for the size of one dim. We treat it as 0-D tensor.
# TODO(titaiwang): set shape?
if isinstance(expected_value, (torch.SymInt, torch.SymFloat, torch.SymBool)):
onnxscript_value.dtype = fx_type_utils.from_sym_value_to_torch_dtype(
expected_value
)
elif fx_type_utils.is_torch_complex_dtype(expected_value.dtype):
# Like torch.view_as_real, we flatten complex tensors to real tensors with
# additional last dimension of 2
onnxscript_value.shape = (
*[
dim if isinstance(dim, int) else None
for dim in expected_value.size()
],
2,
)
# complex64 -> float32, complex128 -> float64, etc.
onnxscript_value.dtype = fx_type_utils.from_complex_to_float(
expected_value.dtype
)
# Dispatcher needs to know the value is complex
onnxscript_value.is_complex = True
else:
# We set node output sizes to be dynamic to continue the model conversion,
# and inputs are also set to be dynamic in add_input().
onnxscript_value.shape = tuple(
[dim if isinstance(dim, int) else None for dim in expected_value.size()]
)
onnxscript_value.dtype = expected_value.dtype
# naming
if i > 0:
onnxscript_value.name = f"{name}_{i}"
else:
onnxscript_value.name = name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_values(self):\n\n if self.featureType != \"gene\":\n self.transcriptId = self.meta['transcript_id']\n self.transcriptName = self.meta['transcript_name']\n self.transcriptBioType = self.meta['transcript_biotype']\n if self.featureType == 'exon':\n self.exonNum = self.meta['exon_number']\n self.exonId = self.meta['exon_id']\n elif self.featureType == 'CDS' or self.featureType == 'intron':\n self.exonNum = self.meta['exon_number']",
"def _retrieve_or_adapt_input_to_graph_set(\n fx_node_arg: fx_type_utils.Argument,\n fx_name_to_onnxscript_value: Dict[\n str,\n Union[\n onnxscript_graph_building.TorchScriptTensor,\n Tuple[onnxscript_graph_building.TorchScriptTensor, ...],\n ],\n ],\n tracer: onnxscript_graph_building.TorchScriptTracingEvaluator,\n):\n\n onnx_tensor = fx_node_arg\n if isinstance(onnx_tensor, torch.fx.Node):\n # 1. fx_node_arg is a torch.fx.Node, which means\n # fx_node_arg stands for the output of that torch.fx.Node.\n # 2. fx_node_arg (variable in torch.fx.Graph) is be mapped to\n # torch.jit.Value, fx_name_to_onnxscript_value[fx_node_arg.name],\n # in TorchScript graph.\n return fx_name_to_onnxscript_value[onnx_tensor.name]\n if isinstance(onnx_tensor, (tuple, list)) and any(\n isinstance(node, torch.fx.Node)\n and isinstance(node.meta.get(\"val\"), torch.SymInt)\n for node in onnx_tensor\n ):\n # This intends to handle dynamic axes. for example, if the input size of op.Expand\n # is dynamic, each dimension would be variable (i.e., sym variable in Pytorch\n # FX graph. Note that sym variable is mapped to tensor in ONNX Script world)\n # calculated by other operators.\n sequence_mixed_elements: List[\n Union[\n onnxscript_graph_building.TorchScriptTensor,\n List[int],\n ]\n ] = []\n for tensor in onnx_tensor:\n if isinstance(tensor, torch.fx.Node) and isinstance(\n tensor.meta.get(\"val\"), torch.SymInt\n ):\n sequence_mixed_elements.append(fx_name_to_onnxscript_value[tensor.name])\n elif isinstance(tensor, int):\n # NOTE: op.Concat doesn't support scalar, so we need to wrap it with\n # dim, and onnx-script will promote it to tensot(int64)\n sequence_mixed_elements.append([tensor])\n # Concat all the elements in the sequence.\n # shapes are mapped to tensors in ONNX graph (TorchScriptGraph),\n # so list of sym_ints is concatenated to a tensor before calling ONNX op.\n\n # For example:\n # inputs: [[2], [4], fx.Node(SymIntA), [1], fx.Node(SymIntB)]\n # outputs: op.Concat([op.Constant(2), op.Constant(4), TorchScriptTensor(A), op.Constant(1), TorchScriptTensor(B)])\n\n # onnx-script auto wraps python number with op.Constants,\n # so we don't need to specifically process them.\n with onnxscript.evaluator.default_as(tracer):\n output = onnxscript.opset18.Concat(*sequence_mixed_elements, axis=0)\n output.dtype = torch.int64\n output.shape = [len(sequence_mixed_elements)]\n return output\n elif isinstance(onnx_tensor, (tuple, list)) and all(\n isinstance(node, torch.fx.Node) or node is None for node in onnx_tensor\n ):\n sequence_elements: List[\n Union[\n Optional[onnxscript_graph_building.TorchScriptTensor],\n Tuple[\n onnxscript_graph_building.TorchScriptTensor,\n ...,\n ],\n ]\n ] = []\n for tensor in onnx_tensor:\n sequence_elements.append(\n fx_name_to_onnxscript_value[tensor.name] if tensor is not None else None\n )\n return sequence_elements\n if isinstance(onnx_tensor, torch.dtype):\n onnx_tensor = int(\n jit_type_utils.JitScalarType.from_dtype(onnx_tensor).onnx_type()\n )\n # NOTE: if device is specified in kwargs (not consumed), it's free to ignored. But\n # if it's in args, we need to set it to string for dispatcher to match schema.\n if isinstance(onnx_tensor, torch.device):\n # torch.device is not supported by onnxscript (no op). We turn it into\n # a string.\n return str(onnx_tensor)\n\n # all other cases, we do nothing.\n return onnx_tensor",
"def var_metadata(self, index):\n if index is not None:\n metadata = []\n for m in self.primary_header['variables'][index]['metadata']:\n meta = {\n 'value': m['Value'] / 10**m['Value precision'],\n 'code': m['Variable-specific code'],\n }\n if 'iMeta' in m:\n meta['iMeta'] = m['iMeta']\n else:\n meta['iMeta'] = 0\n metadata.append(meta)\n return metadata\n else:\n return None",
"def addPhotonVariables(hf, event, data_temp, pho):\n # data_temp[ 0, column_names.index( 'pho_truthPdgId_egam') ] = hf[ 'pho_truthPdgId_egam' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_truthPdgId_atlas') ] = hf[ 'pho_truthPdgId_atlas' ][ event][ pho ]\n # data_temp[ 0, column_names.index( 'pho_egamTruthParticle') ] = hf[ 'pho_egamTruthParticle' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_truthType') ] = hf[ 'pho_truthType' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_truthOrigin') ] = hf[ 'pho_truthOrigin' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_isPhotonEMLoose') ] = hf[ 'pho_isPhotonEMLoose' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_isPhotonEMTight') ] = hf[ 'pho_isPhotonEMTight' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_e') ] = hf[ 'pho_e' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_eta') ] = hf[ 'pho_eta' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_phi') ] = hf[ 'pho_phi' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_et') ] = hf[ 'pho_et' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_Rhad1') ] = hf[ 'pho_Rhad1' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_Rhad') ] = hf[ 'pho_Rhad' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_weta2') ] = hf[ 'pho_weta2' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_Rphi') ] = hf[ 'pho_Rphi' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_Reta') ] = hf[ 'pho_Reta' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_Eratio') ] = hf[ 'pho_Eratio' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_f1') ] = hf[ 'pho_f1' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_wtots1') ] = hf[ 'pho_wtots1' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_DeltaE') ] = hf[ 'pho_DeltaE' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_weta1') ] = hf[ 'pho_weta1' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_fracs1') ] = hf[ 'pho_fracs1' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_ConversionType') ] = hf[ 'pho_ConversionType' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_ConversionRadius') ] = hf[ 'pho_ConversionRadius' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_VertexConvEtOverPt') ] = hf[ 'pho_VertexConvEtOverPt' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_VertexConvPtRatio') ] = hf[ 'pho_VertexConvPtRatio' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_topoetcone20') ] = hf[ 'pho_topoetcone20' ][ event][ pho ]\n # data_temp[ 0, column_names.index( 'pho_topoetcone30') ] = hf[ 'pho_topoetcone30' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_topoetcone40') ] = hf[ 'pho_topoetcone40' ][ event][ pho ]\n data_temp[ 0, column_names.index( 'pho_ptvarcone20') ] = hf[ 'pho_ptvarcone20' ][ event][ pho ]\n # data_temp[ 0, column_names.index( 'pho_ptvarcone30') ] = hf[ 'pho_ptvarcone30' ][ event][ pho ]\n # data_temp[ 0, column_names.index( 'pho_ptvarcone40') ] = hf[ 'pho_ptvarcone40' ][ event][ pho ]",
"def gen_values(self):",
"def fillCoreVariables(self, tr, event, isMC):\n tr.fill('run', event.input.eventAuxiliary().id().run())\n tr.fill('lumi',event.input.eventAuxiliary().id().luminosityBlock())\n tr.fill('evt', event.input.eventAuxiliary().id().event()) \n tr.fill('isData', 0 if isMC else 1)\n\n# triggerResults = self.handles['TriggerResults'].product()\n# for T,TC in self.triggerBitCheckers:\n# tr.fill(\"HLT_\"+T, TC.check(event.object(), triggerResults))\n\n if not isMC:\n tr.fill('intLumi', getattr(self.cfg_comp,'intLumi',1.0))\n\n if isMC:\n ## xsection, if available\n tr.fill('xsec', getattr(self.cfg_comp,'xSection',1.0))\n ## PU weights, check if a PU analyzer actually filled it\n if hasattr(event,\"nPU\"):\n tr.fill(\"nTrueInt\", event.nPU)\n tr.fill(\"puWeight\", event.puWeight)\n else :\n tr.fill(\"nTrueInt\", -1)\n tr.fill(\"puWeight\", 1.0)\n\n tr.fill(\"genWeight\", self.mchandles['GenInfo'].product().weight())\n ## PDF weights\n if hasattr(event,\"pdfWeights\") :\n for (pdf,nvals) in self.pdfWeights:\n if len(event.pdfWeights[pdf]) != nvals:\n raise RuntimeError(\"PDF lenght mismatch for %s, declared %d but the event has %d\" % (pdf,nvals,event.pdfWeights[pdf]))\n if self.scalar:\n for i,w in enumerate(event.pdfWeights[pdf]):\n tr.fill('pdfWeight_%s_%d' % (pdf,i), w)\n else:\n tr.vfill('pdfWeight_%s' % pdf, event.pdfWeights[pdf])",
"def test_fc(self):\n # These entries exist for both Nodal and VARIANT, but have different values\n # for the same model\n print(self.nhf.metadata.items())\n self.assertEqual(self.nhf.metadata[\"nMom\"], 35)\n self.assertEqual(self.nhf.metadata[\"nscoef\"], 3)\n\n # These entries are only for VARIANT\n self.assertEqual(self.nhf.metadata[\"npcbdy\"], 30)\n self.assertEqual(self.nhf.metadata[\"npcsym\"], 0)\n self.assertEqual(self.nhf.metadata[\"npcsec\"], 0)\n self.assertEqual(self.nhf.metadata[\"iwnhfl\"], 0)\n self.assertEqual(self.nhf.metadata[\"nMoms\"], 0)",
"def _assume_meta(self, new_meta, new_var, old_var):\n meta = self._meta\n n_masks = new_meta['masks']\n n_cols = new_meta['columns']\n n_sets = new_meta['sets']\n n_lib_v = new_meta['lib']['values']\n\n if self.is_array(old_var):\n n_masks[new_var] = org_copy.deepcopy(meta['masks'][old_var])\n n_masks[new_var]['name'] = new_var\n if self._has_categorical_data(old_var):\n n_lib_v[new_var] = meta['lib']['values'][old_var]\n n_sets[new_var] = org_copy.deepcopy(meta['sets'][old_var])\n n_sets['data file']['items'].append('masks@{}'.format(new_var))\n for var in self.sources(old_var):\n new_meta = self._assume_meta(new_meta, var, var)\n else:\n n_cols[new_var] = org_copy.deepcopy(meta['columns'][old_var])\n n_cols[new_var]['name'] = new_var\n if self._is_array_item(old_var):\n if not self._maskname_from_item(old_var) in new_meta['masks']:\n n_cols[new_var]['parent'] = {}\n n_cols[new_var]['values'] = self._get_value_loc(old_var)\n n_sets['data file']['items'].append('columns@{}'.format(new_var))\n else:\n n_sets['data file']['items'].append('columns@{}'.format(new_var))\n\n return new_meta",
"def getVar(inmeta):\n meta = AutoVivification()\n with open(inmeta) as fp:\n for line in fp:\n cols=line.split(',')\n varname=cols[0].strip()\n meta[varname]['agg'] = cols[1].strip()\n meta[varname]['dtyp'] = cols[2].strip()\n meta[varname]['long_name'] = cols[3].strip()\n meta[varname]['units'] = cols[4].strip()\n return meta",
"def _attach_metadata(self):\n self.dataset.create_metadata(\"watertightness\", \"float\", \"1.0 if the mesh is watertight, 0.0 if it is not\")\n self.dataset.attach_metadata_func(\"watertightness\", DexNet.is_watertight, overwrite=False, store_func=True)\n self.dataset.create_metadata(\"num_con_comps\", \"float\", \"Number of connected components (may not be watertight) in the mesh\")\n self.dataset.attach_metadata_func(\"num_con_comps\", object(), overwrite=False, store_func=True)",
"def _setitem_static(x, indices, values):\n from .framework import default_main_program, Variable\n\n if x.type == paddle.fluid.core.VarDesc.VarType.LOD_TENSOR_ARRAY:\n return _setitem_for_tensor_array(x, indices, values)\n\n # step1: parsing the index and recording them\n (\n starts,\n ends,\n steps,\n axes,\n none_axes,\n decrease_axes,\n advanced_index,\n has_advanced_index,\n use_strided_slice,\n ) = parse_index(x, indices)\n\n inputs = {'Input': x}\n attrs = {\n 'axes': axes,\n 'starts': starts,\n 'ends': ends,\n 'steps': steps,\n 'decrease_axes': decrease_axes,\n 'none_axes': none_axes,\n }\n if paddle.utils._contain_var(starts):\n inputs['StartsTensorList'] = paddle.utils._convert_to_tensor_list(\n starts\n )\n del attrs['starts']\n if paddle.utils._contain_var(ends):\n inputs['EndsTensorList'] = paddle.utils._convert_to_tensor_list(ends)\n del attrs['ends']\n if paddle.utils._contain_var(steps):\n inputs['StepsTensorList'] = paddle.utils._convert_to_tensor_list(steps)\n del attrs['steps']\n\n if not has_advanced_index:\n # step2. Parse values\n dtype = x.dtype\n attrs['dtype'] = dtype\n\n from .data_feeder import convert_dtype\n\n if isinstance(values, (bool, int, float, complex)):\n values = np.array([values]).astype(convert_dtype(dtype))\n\n if isinstance(values, np.ndarray):\n shape = list(values.shape)\n values = values.ravel().tolist()\n attrs[\"values\"] = values\n attrs[\"shape\"] = shape\n\n elif isinstance(values, Variable):\n inputs[\"ValueTensor\"] = values\n else:\n raise TypeError(\n \"Only support to assign an integer, float, numpy.ndarray or \"\n \"paddle.Tensor to a paddle.Tensor, but received {}\".format(\n type(values)\n )\n )\n\n # step3.1: Only basic indexing, use OP set_value to set value.\n if paddle.in_dynamic_mode():\n x._bump_inplace_version()\n output = x\n else:\n helper = paddle.fluid.layer_helper.LayerHelper(\n 'set_value', **locals()\n )\n if helper.main_program.current_block_idx != 0:\n # not in global block, we should create a global variable.\n output = helper._create_global_variable_for_type_inference(\n dtype=x.dtype\n )\n else:\n output = helper.create_variable_for_type_inference(\n dtype=x.dtype\n )\n cur_block = default_main_program().current_block()\n cur_block.append_op(\n type=\"set_value\",\n inputs=inputs,\n outputs={'Out': output},\n attrs=attrs,\n inplace_map={\"Input\": \"Out\"},\n )\n\n if not paddle.in_dynamic_mode():\n # map var to the new output\n paddle.jit.api.ProgramTranslator.get_instance()._params_map.add(\n cur_block.program, x.desc.id(), output\n )\n return output\n else:\n # step3.2: Case for there are advanced indexing.\n # 1. get __getitem__ result of basic indexing;\n # 2. transpose original tensor so that the axis with advanced indexing will come to the first;\n # 3. assign values to the sliced result by index_put OP;\n # 4. transpose back and assign the result to original tensor by set_value OP.\n\n sub_tensor = get_tensor_with_basic_indexing(\n x,\n axes,\n starts,\n ends,\n steps,\n decrease_axes,\n none_axes,\n use_strided_slice,\n )\n (\n transed_sub_tensor,\n adjusted_advanced_index,\n transback_dim,\n _,\n _,\n ) = deal_advanced_index(sub_tensor, advanced_index, True)\n if not isinstance(values, Variable):\n values = paddle.assign(values).astype(transed_sub_tensor.dtype)\n transed_sub_tensor = transed_sub_tensor.index_put(\n adjusted_advanced_index, values\n )\n\n # NOTE(zoooo0820): now basic indexing of __getitem__ will return a new Tensor both in dynamic and static mode\n # After strided is ready and basic indexing returns view of Tensor in dynamic mode. The code shoule be changed\n # for dynamic mode.\n if paddle.in_dynamic_mode():\n transed_sub_tensor.index_put_(adjusted_advanced_index, values)\n else:\n transed_sub_tensor = transed_sub_tensor.index_put(\n adjusted_advanced_index, values\n )\n\n transback_sub_tensor = transed_sub_tensor.transpose(transback_dim)\n\n inputs[\"ValueTensor\"] = transback_sub_tensor\n if paddle.in_dynamic_mode():\n x._bump_inplace_version()\n output = x\n else:\n helper = paddle.fluid.layer_helper.LayerHelper(\n 'set_value', **locals()\n )\n if helper.main_program.current_block_idx != 0:\n # not in global block, we should create a global variable.\n output = helper._create_global_variable_for_type_inference(\n dtype=x.dtype\n )\n else:\n output = helper.create_variable_for_type_inference(\n dtype=x.dtype\n )\n cur_block = default_main_program().current_block()\n cur_block.append_op(\n type=\"set_value\",\n inputs=inputs,\n outputs={'Out': output},\n attrs=attrs,\n inplace_map={\"Input\": \"Out\"},\n )\n if not paddle.in_dynamic_mode():\n # map var to the new output\n paddle.jit.api.ProgramTranslator.get_instance()._params_map.add(\n cur_block.program, x.desc.id(), output\n )\n return output",
"def _update_metadata_imagedata(metadata, out_filebase, i):\n metadata['FITSImageFilename'] = [out_filebase + FITS_EXT]\n metadata['PNGImageFileName'] = [out_filebase + PNG_EXT]\n metadata['PNGThumbNailFileName'] = [out_filebase + '_tnail' + PNG_EXT]\n\n image_keys = [\"IntegrationTime\", \"RightAscension\", \"Declination\",\n \"DecRa\", \"Targets\", \"KatpointTargets\"]\n for key in image_keys:\n metadata[key] = [metadata[key][i]]",
"def _wrap_fx_args_as_onnxscript_args(\n complete_args: List[fx_type_utils.Argument],\n complete_kwargs: Dict[str, fx_type_utils.Argument],\n fx_name_to_onnxscript_value: Dict[\n str,\n Union[\n onnxscript_graph_building.TorchScriptTensor,\n Tuple[onnxscript_graph_building.TorchScriptTensor, ...],\n ],\n ],\n tracer: onnxscript_graph_building.TorchScriptTracingEvaluator,\n) -> Tuple[\n Sequence[\n Optional[\n Union[\n onnxscript_graph_building.TorchScriptTensor,\n str,\n int,\n float,\n bool,\n list,\n ]\n ]\n ],\n Dict[str, fx_type_utils.Argument],\n]:\n\n onnxscript_args = tuple(\n _retrieve_or_adapt_input_to_graph_set(arg, fx_name_to_onnxscript_value, tracer)\n for arg in complete_args\n )\n onnxscript_kwargs = filter_incompatible_and_dtype_convert_kwargs(complete_kwargs)\n\n return onnxscript_args, onnxscript_kwargs",
"def t_metadata(self):\n index = self.var_index()\n return self.var_metadata(index)",
"def to_jit(self, *values, **kwargs):\n constraints = {f\"x{i}\": v.tensor_type_dims\n for i, v in enumerate(values)}\n if self.output_types is not None:\n constraints.update(self.output_types)\n inputs = [Input(f\"x{i}\") for i in range(len(values))]\n var = self.f(*inputs, **kwargs)\n onx = var.to_onnx(constraints=constraints,\n target_opsets=self.target_opsets,\n ir_version=self.ir_version)\n names = [f\"x{i}\" for i in range(len(values))]\n exe = self.tensor_class.create_function(names, onx)\n return onx, exe",
"def update_values(self):\n for key in self.inputs.keys():\n value = self.inputs[key]['entry'].get()\n self.inputs[key]['value'] = value",
"def run(\n self,\n fx_graph_module: torch.fx.GraphModule,\n onnxfunction_dispatcher: onnxfunction_dispatcher.OnnxFunctionDispatcher,\n op_level_debug: bool,\n parent_onnxscript_graph: Optional[\n onnxscript_graph_building.TorchScriptGraph\n ] = None,\n ) -> onnxscript_graph_building.TorchScriptGraph:\n onnxscript_graph = onnxscript_graph_building.TorchScriptGraph(\n parent_onnxscript_graph\n )\n onnxscript_tracer = onnxscript_graph_building.TorchScriptTracingEvaluator(\n onnxscript_graph\n )\n # In the following loop, a TorchScript graph is created to\n # represent the input FX graph with ONNX symbols (e.g., onnx::add).\n # To connect the values to nodes in the TorchScript graph, we maintian\n # fx_name_to_onnxscript_value. Basically, we want to translate\n # fx_tensor_x (type: torch.fx.Node) -> fx_node_1 -> fx_tensor_y (type: torch.fx.Node)\n # to\n # fx_name_to_onnxscript_value[fx_tensor_x.name] -> onnx_node_1 -> fx_name_to_onnxscript_value[fx_tensor_y.name]\n fx_name_to_onnxscript_value: Dict[\n str,\n Union[\n onnxscript_graph_building.TorchScriptTensor,\n Tuple[onnxscript_graph_building.TorchScriptTensor, ...],\n ],\n ] = {}\n\n # TODO: Fix FakeTensorMode limitation asap\n # We want to pass list of ints and floats to TorchScript graph correctly\n # in _export_fx_to_ts, so we must disable FakeTensorMode. Otherwise, graph may\n # receive FakeTensor and results runtime error. In addition, TorchScript-based\n # ONNX exporter used in _ts_graph_to_onnx_model_in_protobuf is not compatible\n # with FakeTensorMode.\n with torch.utils._mode_utils.no_dispatch():\n # node_fixed_shape is only used on op_level_debug purpose.\n for node in fx_graph_module.graph.nodes:\n self.run_node(\n node,\n fx_graph_module,\n onnxfunction_dispatcher,\n op_level_debug,\n onnxscript_graph,\n onnxscript_tracer,\n fx_name_to_onnxscript_value,\n )\n\n return onnxscript_graph",
"def values(self, values):\n self.data.values = values",
"def _post_process(self):\n # merge extendedMetadata into metadata\n if 'instance' in self._metadata and self._metadata['instance'] is not None:\n if 'metadata' in self._metadata['instance']:\n if 'extendedMetadata' in self._metadata['instance']:\n v = self._metadata['instance'].pop('extendedMetadata')\n self._metadata['instance']['metadata'].update(v)\n else:\n if 'extendedMetadata' in self._metadata['instance']:\n v = self._metadata.pop('extendedMetadata')\n self._metadata['metadata'] = v\n\n # change vnic's id to vnicId\n if 'vnics' in self._metadata:\n for i in range(len(self._metadata['vnics'])):\n v = self._metadata['vnics'][i].pop('id')\n self._metadata['vnics'][i]['vnicId'] = v",
"def __init__(self, f, index_value_pairs):\n # super(FixVariables, self).__init__(\n ComposedFunction.__init__(self, [f, self.insert_variables])\n self.index_value_pairs = dict(index_value_pairs)",
"def set_variable_values(self, vars_values):\n raise NotImplementedError()",
"def set_tensor_data(self, data: dict) -> None:\n assert isinstance(data,\n dict), f'data should be a `dict` but got {data}'\n for k, v in data.items():\n if k == 'gt_label':\n self.set_gt_label(v)\n elif k == 'prompt':\n self.set_field(v, k, dtype=(str, list))\n else:\n self.set_field(all_to_tensor(v), k, dtype=torch.Tensor)",
"def test_metadata_to_imu_0(self):\n data = ET.parse(\"data/metadata_0.xml\")\n data_str = ET.tostring(data.getroot())\n\n dict = tesse_ros_bridge.utils.parse_metadata(data_str)\n proc_dict = tesse_ros_bridge.utils.process_metadata(dict, 0, [0,0,0], np.identity(3))\n\n imu = tesse_ros_bridge.utils.metadata_to_imu(proc_dict, 0, \"f\")\n\n self.assertEqual(imu.header.frame_id, \"f\")\n self.assertEqual(imu.header.stamp, 0)\n self.assertEqual(imu.angular_velocity.x, proc_dict['ang_vel'][0])\n self.assertEqual(imu.angular_velocity.y, proc_dict['ang_vel'][1])\n self.assertEqual(imu.angular_velocity.z, proc_dict['ang_vel'][2])\n self.assertEqual(imu.linear_acceleration.x,\n proc_dict['acceleration'][0])\n self.assertEqual(imu.linear_acceleration.y,\n proc_dict['acceleration'][1] - 9.81)\n self.assertEqual(imu.linear_acceleration.z,\n proc_dict['acceleration'][2])\n\n # TODO(marcus): add checks on angular velocity between two frames",
"def values():",
"def update_meta_data(s, m_list, m_codes):\n # TODO(elia): This is not data about the data (=metadata) but data about component models\n\n s['clf_labels'] = collect_and_verify_clf_classlabels(m_list, m_codes)\n s['FI'] = collect_feature_importances(m_list, m_codes)\n\n return s",
"def init_meta(self):\n meta = {}\n # Required (core)\n meta['ra'] = dict(ext=1, card='RA')\n meta['dec'] = dict(ext=1, card='DEC')\n meta['target'] = dict(ext=1, card='OBJECT')\n meta['decker'] = dict(ext=1, card='APERTURE')\n meta['dichroic'] = dict(ext=1, card='FILTER')\n meta['binning'] = dict(ext=1, card=None, default='1,1')\n\n meta['mjd'] = dict(ext=0, card=None, compound=True)\n meta['exptime'] = dict(ext=1, card='EXPTIME')\n meta['airmass'] = dict(ext=1, card='AIRMASS')\n # Extras for config and frametyping\n meta['dispname'] = dict(ext=1, card='DISPERSE')\n meta['idname'] = dict(ext=1, card='IMAGETYP')\n\n # Ingest\n self.meta = meta",
"def postprocessData(meta, units, data):\n\n data['time'] = np.arange(0, meta['dt'] * len(data), meta['dt'])\n units['time'] = 's'\n\n meta, units, data = self.calculateForce(meta, units, data)\n\n data['distance'] = np.sqrt(data.xDist**2 + data.yDist**2)\n units['distance'] = 'nm'\n\n return meta, units, data",
"def populateMeta(self, *args):\n meta = self._getAllMeta()\n if not meta:\n raise MetaReadError(\"Error Reading Image MetaData, has image finished copying?\")\n else:\n self.exifKeys = self._getAllMetaKeys(meta)\n for key in self.exifKeys:\n if key == self._getExifKey_TimeCode():\n tag = meta[self._getExifKey_TimeCode()]\n self.startTimecode = tag.raw_value\n self._splitTimecode()\n \n if args:\n for arg in args:\n try:\n lTag = meta[arg]\n self.__dict__[arg.split('.')[1] + '_' + arg.split('.')[2]] = lTag.raw_value\n except:\n print 'could not get meta for tag ', arg",
"def set_metadata(self, chunk, coords, value):\n\n chunk.set_metadata(coords, value)",
"def set_properties(self):\n self.exh.rho_array = np.empty(np.size(self.exh.T_array))\n self.exh.mu_array = np.empty(np.size(self.exh.T_array))\n for i in range(np.size(self.exh.T_array)):\n self.exh.T = self.exh.T_array[i]\n self.exh.set_TempPres_dependents()\n self.exh.rho_array[i] = self.exh.rho\n self.exh.mu_array[i] = self.exh.mu"
] | [
"0.60349166",
"0.5497529",
"0.5374807",
"0.5361385",
"0.5248724",
"0.5213704",
"0.5072275",
"0.5041288",
"0.50263256",
"0.5021723",
"0.50011504",
"0.49802682",
"0.48824126",
"0.48571992",
"0.48536652",
"0.4850609",
"0.48376772",
"0.47934875",
"0.4792228",
"0.4781153",
"0.4781021",
"0.47563714",
"0.4738805",
"0.47348046",
"0.47139525",
"0.47109595",
"0.46808738",
"0.4679963",
"0.4677565",
"0.46746922"
] | 0.5958788 | 1 |
Export a fx.GraphModule submodule to ONNXScript graph. The export process specifically targets `call_module` nodes that are created by the exporter's `Modularize` pass. Each `call_module` node has an associated fx.GraphModule by `node.target` underneath the root fx.GraphModule. These `call_module` nodes are exported as ONNX function nodes. The related `sub_module` is then exported as an ONNX model local function, which is represented by another `TorchScriptGraph`. This `TorchScriptGraph` sets the current `onnxscript_graph` as its parent. | def call_module(
self,
node: torch.fx.Node,
parent_onnxscript_graph: onnxscript_graph_building.TorchScriptGraph,
fx_name_to_onnxscript_value: Dict[
str,
Union[
onnxscript_graph_building.TorchScriptTensor,
Tuple[onnxscript_graph_building.TorchScriptTensor, ...],
],
],
tracer: onnxscript_graph_building.TorchScriptTracingEvaluator,
root_fx_graph_module: torch.fx.GraphModule,
onnxfunction_dispatcher: onnxfunction_dispatcher.OnnxFunctionDispatcher,
op_level_debug: bool,
) -> None:
assert isinstance(
node.target, str
), f"node.target must be a str, not {type(node.target)} for node {node}."
sub_module = root_fx_graph_module.get_submodule(node.target)
assert isinstance(
sub_module, torch.fx.GraphModule
), f"sub_module must be a torch.fx.GraphModule, not {type(sub_module)} for node {node}."
sub_onnxscript_graph = self.run(
sub_module, onnxfunction_dispatcher, op_level_debug, parent_onnxscript_graph
)
onnx_args, _ = _wrap_fx_args_as_onnxscript_args(
list(node.args), {}, fx_name_to_onnxscript_value, tracer
)
# TODO: We may want to consider other naming styles. The goal is to be stable and
# unique such that it can be easily identified in case of kernel substitution.
# Example for current style is combination of qualified module class name and
# module attribute name: `torch_nn_modules_conv_Conv2d_conv1`.
# Other naming styles such as qualified module class name made unique can also
# be considered.
unique_module_name = f"{sub_module._get_name()}_{node.target}"
outputs: Union[ # type: ignore[no-redef]
onnxscript_graph_building.TorchScriptTensor,
Tuple[onnxscript_graph_building.TorchScriptTensor, ...],
] = parent_onnxscript_graph.add_module_call(
unique_module_name, sub_onnxscript_graph, onnx_args
)
assert isinstance(
outputs, (onnxscript_graph_building.TorchScriptTensor, tuple)
), f"Unexpected outputs type {type(outputs)} for node {node}."
_fill_tensor_shape_type(outputs, node.name, node.meta["val"])
fx_name_to_onnxscript_value[node.name] = outputs
# Skip op_level_validation for call_module. Subgraph nodes are validated individually. | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run(\n self,\n fx_graph_module: torch.fx.GraphModule,\n onnxfunction_dispatcher: onnxfunction_dispatcher.OnnxFunctionDispatcher,\n op_level_debug: bool,\n parent_onnxscript_graph: Optional[\n onnxscript_graph_building.TorchScriptGraph\n ] = None,\n ) -> onnxscript_graph_building.TorchScriptGraph:\n onnxscript_graph = onnxscript_graph_building.TorchScriptGraph(\n parent_onnxscript_graph\n )\n onnxscript_tracer = onnxscript_graph_building.TorchScriptTracingEvaluator(\n onnxscript_graph\n )\n # In the following loop, a TorchScript graph is created to\n # represent the input FX graph with ONNX symbols (e.g., onnx::add).\n # To connect the values to nodes in the TorchScript graph, we maintian\n # fx_name_to_onnxscript_value. Basically, we want to translate\n # fx_tensor_x (type: torch.fx.Node) -> fx_node_1 -> fx_tensor_y (type: torch.fx.Node)\n # to\n # fx_name_to_onnxscript_value[fx_tensor_x.name] -> onnx_node_1 -> fx_name_to_onnxscript_value[fx_tensor_y.name]\n fx_name_to_onnxscript_value: Dict[\n str,\n Union[\n onnxscript_graph_building.TorchScriptTensor,\n Tuple[onnxscript_graph_building.TorchScriptTensor, ...],\n ],\n ] = {}\n\n # TODO: Fix FakeTensorMode limitation asap\n # We want to pass list of ints and floats to TorchScript graph correctly\n # in _export_fx_to_ts, so we must disable FakeTensorMode. Otherwise, graph may\n # receive FakeTensor and results runtime error. In addition, TorchScript-based\n # ONNX exporter used in _ts_graph_to_onnx_model_in_protobuf is not compatible\n # with FakeTensorMode.\n with torch.utils._mode_utils.no_dispatch():\n # node_fixed_shape is only used on op_level_debug purpose.\n for node in fx_graph_module.graph.nodes:\n self.run_node(\n node,\n fx_graph_module,\n onnxfunction_dispatcher,\n op_level_debug,\n onnxscript_graph,\n onnxscript_tracer,\n fx_name_to_onnxscript_value,\n )\n\n return onnxscript_graph",
"def export_finn_onnx(module, input_shape, export_path, input_t = None,\n torch_onnx_kwargs = {}):\n if onnx is None or opt is None:\n raise ModuleNotFoundError(\"Installation of ONNX is required.\")\n\n with torch.no_grad():\n # TODO maybe consider a deepcopy of the module first?\n module = module.eval()\n if input_t is None:\n input_t = torch.empty(input_shape, dtype=torch.float)\n # do a forward pass with the dummy input to e.g. store per-layer input\n # and output shapes\n output_t = module.forward(input_t)\n # override any given input_t to make sure it's a standard PyTorch tensor\n input_t = torch.empty(input_shape, dtype=torch.float)\n # enable export mode and call export\n _prepare_for_finn_onnx_export(module, enable_export = True)\n torch.onnx.export(module, input_t, export_path, **torch_onnx_kwargs)\n # restore the model to non-export mode to keep it clean\n _prepare_for_finn_onnx_export(module, enable_export = False)\n # do some cleanup on the exported ONNX model\n model = onnx.load(export_path)\n onnx_passes = [\n # use initializers instead of Constant nodes for fixed params\n \"extract_constant_to_initializer\",\n # remove unused graph inputs (e.g. zero_hw_sentinel) & initializers\n \"eliminate_unused_initializer\"\n ]\n model = opt.optimize(model, onnx_passes)\n model = _move_quant_attributes_into_annotations(model)\n model = _move_domain_attributes_into_domain(model)\n onnx.save(model, export_path)",
"def _prepare_for_finn_onnx_export(module, enable_export = True):\n\n try:\n module.export_mode = enable_export\n except AttributeError:\n # module does not have the prepare_for_export function, skip\n pass\n for c in module.children():\n _prepare_for_finn_onnx_export(c, enable_export)",
"def run_node(\n self,\n node,\n fx_graph_module: torch.fx.GraphModule,\n onnxfunction_dispatcher: onnxfunction_dispatcher.OnnxFunctionDispatcher,\n op_level_debug: bool,\n onnxscript_graph: onnxscript_graph_building.TorchScriptGraph,\n onnxscript_tracer: onnxscript_graph_building.TorchScriptTracingEvaluator,\n fx_name_to_onnxscript_value: Dict[\n str,\n Union[\n onnxscript_graph_building.TorchScriptTensor,\n Tuple[onnxscript_graph_building.TorchScriptTensor, ...],\n ],\n ],\n ):\n # Record stack trace of node in diagnostic.\n node_stack_trace = node.stack_trace\n if node_stack_trace:\n diagnostic = self.diagnostic_context.inflight_diagnostic(\n rule=diagnostics.rules.fx_node_to_onnx\n )\n diagnostic.with_additional_message(\n f\"### PyTorch source information\\n```\\n{node_stack_trace}\\n```\"\n )\n location = _location_from_fx_stack_trace(node_stack_trace)\n if location is not None:\n diagnostic.with_location(location)\n\n if node.op == \"placeholder\":\n self.placeholder(node, onnxscript_graph, fx_name_to_onnxscript_value)\n elif node.op == \"get_attr\":\n self.get_attr(\n node,\n onnxscript_graph,\n fx_name_to_onnxscript_value,\n fx_graph_module,\n )\n elif node.op == \"call_function\":\n self.call_function(\n node,\n onnxscript_tracer,\n fx_name_to_onnxscript_value,\n onnxfunction_dispatcher,\n op_level_debug,\n fx_graph_module,\n )\n elif node.op == \"call_method\":\n self.call_method(node)\n elif node.op == \"call_module\":\n self.call_module(\n node,\n onnxscript_graph,\n fx_name_to_onnxscript_value,\n onnxscript_tracer,\n fx_graph_module,\n onnxfunction_dispatcher,\n op_level_debug,\n )\n elif node.op == \"output\":\n self.output(node, onnxscript_graph, fx_name_to_onnxscript_value)\n else:\n raise RuntimeError(f\"Found node type not defined in torch.fx: {node.op}\")",
"def _onnx_graph_from_model(\n model: Union[torch.nn.Module, torch.jit.ScriptModule],\n args: Tuple[Any, ...],\n kwargs: Mapping[str, Any],\n export_options: _experimental.ExportOptions,\n) -> _C.Graph:\n # TODO: refactor utils.py to remove duplicated code of context setup. See #78834\n opset_version = export_options.opset_version\n operator_export_type = export_options.operator_export_type\n export_modules_as_functions = export_options.export_modules_as_functions\n training = export_options.training\n verbose = export_options.verbose\n dynamic_axes = export_options.dynamic_axes\n input_names = export_options.input_names\n output_names = export_options.output_names\n\n if opset_version is None:\n opset_version = _constants.ONNX_DEFAULT_OPSET\n\n utils._setup_trace_module_map(model, export_modules_as_functions)\n\n if not operator_export_type:\n if _C_onnx._CAFFE2_ATEN_FALLBACK:\n operator_export_type = _C_onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK\n else:\n operator_export_type = _C_onnx.OperatorExportTypes.ONNX\n\n GLOBALS.export_onnx_opset_version = opset_version\n GLOBALS.operator_export_type = operator_export_type\n\n with utils.exporter_context(model, training, verbose):\n do_constant_folding = utils._decide_constant_folding(\n export_options.do_constant_folding, operator_export_type, training\n )\n\n if dynamic_axes is None:\n dynamic_axes = {}\n utils._validate_dynamic_axes(dynamic_axes, model, input_names, output_names)\n\n export_inputs = _prepare_input_for_export(args, kwargs)\n export_inputs = utils._decide_input_format(model, export_inputs)\n onnx_graph, _, _ = utils._model_to_graph(\n model,\n export_inputs,\n verbose,\n input_names,\n output_names,\n operator_export_type,\n do_constant_folding,\n training=training,\n dynamic_axes=dynamic_axes,\n )\n\n return onnx_graph",
"def prepare_for_onnx_export_(self, **kwargs):\n seen = set()\n\n def apply_prepare_for_onnx_export_(module):\n if (\n module != self\n and hasattr(module, \"prepare_for_onnx_export_\")\n and module not in seen\n ):\n seen.add(module)\n module.prepare_for_onnx_export_(**kwargs)\n\n self.apply(apply_prepare_for_onnx_export_)",
"def __init__(self, graph,\n nn_module='MLP',\n nn_layers=1,\n nn_mid_units=128,\n nn_mid_acti=tf.tanh,\n nn_out_units=1,\n nn_out_acti=None,\n ignore_nodetype=True,\n name='node_fn'):\n self.graph = graph\n self.nn_module = nn_module\n self.nn_layers = nn_layers\n self.nn_mid_units = nn_mid_units\n self.nn_mid_acti = nn_mid_acti\n self.nn_out_units = nn_out_units\n self.nn_out_acti = nn_out_acti\n self.ignore_nodetype = ignore_nodetype\n self.name = name\n\n self.reuse = None",
"def __init__(self, graph,\n nn_module='MLP',\n nn_layers=1,\n nn_mid_units=128,\n nn_mid_acti=tf.tanh,\n nn_out_units=1,\n nn_out_acti=None,\n ignore_receiver=False,\n ignore_edgetype=False,\n use_interacted_feature=False,\n name='edge_fn'):\n self.graph = graph\n self.nn_module = nn_module\n self.nn_layers = nn_layers\n self.nn_mid_units = nn_mid_units\n self.nn_mid_acti = nn_mid_acti\n self.nn_out_units = nn_out_units\n self.nn_out_acti = nn_out_acti\n self.ignore_receiver = ignore_receiver\n self.ignore_edgetype = ignore_edgetype\n self.use_interacted_feature = use_interacted_feature\n self.name = name\n\n self.reuse = None",
"def gen_graph_functions(env: jinja2.environment.Environment, main_graph: onnx.GraphProto) -> ([str], str, [str]):\n\n main_function_node_scripts = []\n sub_graph_functions = []\n generated_imports = set() # set to avoid duplicate imports\n\n node_tree = onnx_helper.NodeTree(main_graph.node)\n available_outputs = [o.name for o in list(main_graph.output)]\n\n while len(node_tree.nodes) != 0:\n current_lowest_nodes = node_tree.end_nodes\n\n # Find next operation to insert -> check if all outputs are available\n next_tree_node = None\n for tree_node in current_lowest_nodes:\n if all(output in available_outputs for output in list(tree_node.node.output)):\n next_tree_node = tree_node\n break\n if not next_tree_node:\n raise Exception(\"Error in parsing nodes, did not find a next node to compute\")\n\n # Insert generated parts\n generated_node = gen_node_script(env, main_graph, next_tree_node.node)\n generated_imports.update(generated_node.imports)\n main_function_node_scripts.append(generated_node.dml_script)\n # handle sub-graphs\n for sub_graph in generated_node.sub_graphs:\n sub_graph_imports, sub_graph_main_function, sub_graph_sub_graph_functions = \\\n gen_graph_functions(env, sub_graph)\n # Inherit imports\n generated_imports.update(sub_graph_imports)\n # Inherit sub-graph functions of sub-graph\n sub_graph_functions += sub_graph_sub_graph_functions\n # Sub-graph main-function becomes sub-graph function\n sub_graph_functions.append(sub_graph_main_function)\n\n # After insertion the inputs to the node become available and the node is removed\n available_outputs += list(next_tree_node.node.input)\n node_tree.remove_end_node(next_tree_node)\n\n main_function_node_scripts.reverse()\n main_graph_function = render_function(env, main_graph, main_function_node_scripts)\n return list(generated_imports), main_graph_function, sub_graph_functions",
"def test_non_leaf_module_names(self):\n class Net(torch.nn.Module):\n \"\"\"\n Model using multiply as functional and module at different depths\n \"\"\"\n def __init__(self):\n super().__init__()\n self.layer = HierarchicalMultiplyModule()\n\n def forward(self, x):\n return self.layer(x)\n\n model = Net()\n dummy_input = torch.randn(10, 1, 3)\n onnx_path = './data/MyModel.onnx'\n\n torch.onnx.export(model, dummy_input, onnx_path)\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input)\n\n onnx_model = onnx.load(onnx_path)\n onnx.checker.check_model(onnx_model)\n self.check_onnx_node_name_uniqueness(onnx_model)\n\n expected_names = [\n # names compatible with torch 1.9.1 version (should be removed in the future)\n 'layer.mul1.mul',\n 'layer.mul1.Mul_7',\n 'layer.mul2.mul',\n 'layer.mul2.Mul_15',\n 'layer.Mul_18',\n \n # names compatible with torch 1.13.1 version \n '/layer/mul1/Mul',\n '/layer/mul2/Mul',\n '/layer/Mul'\n ]\n for node in onnx_model.graph.node:\n assert 'Constant' in node.name or node.name in expected_names\n\n if os.path.exists(onnx_path):\n os.remove(onnx_path)",
"def export_onnx_model(model, inputs, passes):\n assert isinstance(model, torch.nn.Module)\n\n # make sure all modules are in eval mode, onnx may change the training\n # state of the module if the states are not consistent\n def _check_eval(module):\n assert not module.training\n\n model.apply(_check_eval)\n\n # Export the model to ONNX\n with torch.no_grad():\n with io.BytesIO() as f:\n torch.onnx.export(\n model,\n inputs,\n f,\n operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK,\n # verbose=True, # NOTE: uncomment this for debugging\n # export_params=True,\n )\n onnx_model = onnx.load_from_string(f.getvalue())\n\n # Apply ONNX's Optimization\n if passes is not None:\n all_passes = optimizer.get_available_passes()\n assert all(p in all_passes for p in passes), \\\n 'Only {} are supported'.format(all_passes)\n onnx_model = optimizer.optimize(onnx_model, passes)\n return onnx_model",
"def export_onnx(self, export_path) -> None:\n EVAL_MAX_CLICKS = self.net_clicks_limit\n POINT_LENGTH = EVAL_MAX_CLICKS * 2\n HEIGHT, WIDTH = self.input_size\n\n # NOTE: dim=0: orig_img + flip_img = 2\n _image = torch.randn(2, 3, HEIGHT, WIDTH,\n device=self.device,\n dtype=torch.float32)\n _points = torch.ones(2, POINT_LENGTH, 2,\n device=self.device,\n dtype=torch.int32)\n\n # Providing input and output names sets the display names for values\n # within the model's graph. Setting these does not change the semantics\n # of the graph; it is only for readability.\n #\n # The inputs to the network consist of the flat list of inputs (i.e.\n # the values you would pass to the forward() method) followed by the\n # flat list of parameters. You can partially specify names, i.e. provide\n # a list here shorter than the number of inputs to the model, and we will\n # only set that subset of names, starting from the beginning.\n input_names = [ \"image\" ] + [ \"points\" ]\n output_names = [ \"output\"]\n\n # NOTE: Dynamic Axes make input dimension dynamic.\n dynamic_axes = {'points': {1: 'num_pts'}}\n\n # NOTE: Paramters Explanation\n # * args: input arguments. Wrap multiple inputs as tuple.\n # * f: path where the ONNX model is exported.\n # * do_constant_folding: enable constant-folding optimization\n # * input_names: setup input names as a list of string\n # * output_names: setup output names as a list of string\n # * opset_version: opset version of ONNX model. Latest one is recommended.\n # * operator_export_type:\n # * OperatorExportTypes.ONNX: normal mode\n # * OperatorExportTypes.ONNX_ATEN_FALLBACK: check 'ATen' node in debug mode\n # * dynamic_axes: define dynamic dimension inputs\n torch.onnx.export(self.net,\n args=(_image, _points),\n f=export_path,\n export_params=True,\n do_constant_folding=True,\n verbose=True,\n input_names=input_names,\n output_names=output_names,\n opset_version=12,\n operator_export_type=torch.onnx.OperatorExportTypes.ONNX,\n dynamic_axes=dynamic_axes)",
"def write_module_js(output_root):\r\n return _write_js(output_root, _list_modules())",
"def _process_call(self, node: ast.Call) -> None:\n func = node.func\n if self._is_export_call(func):\n func = cast(ast.Call, func)\n # tf_export(...)(id)\n if len(node.args) != 1 or node.keywords:\n raise BadExportError(\n f'{self._current_file}:{node.lineno} export must be'\n f' called with a single value: {ast.dump(node)}'\n )\n symbol = self._name(self._unwrap_simple_call(node.args[0]))\n if not symbol:\n raise BadExportError(\n f'{self._current_file}:{node.lineno} export must be'\n f' called with a single value: {ast.dump(node)}'\n )\n self._add_exported_symbol(func, symbol)\n elif (\n isinstance(func, ast.Attribute)\n and func.attr == 'export_constant'\n and self._is_export_call(func.value)\n ):\n # tf_export(...).export_constant(__name__, id)\n if (\n len(node.args) != 2\n or node.keywords\n or self._name(node.args[0]) != '__name__'\n ):\n raise BadExportError(\n f'{self._current_file}:{node.lineno} export_constant must be'\n f' called with __name__, <id>: {ast.dump(node)}'\n )\n self._add_exported_symbol(func.value, self._literal_value(node.args[1]))\n else:\n self.visit(node)",
"def export_onnx(graph: Graph, do_type_check=True, **kwargs) -> \"onnx.ModelProto\":\n from onnx_graphsurgeon.exporters.onnx_exporter import OnnxExporter\n import onnx\n\n onnx_graph = OnnxExporter.export_graph(graph, do_type_check=do_type_check)\n\n if \"opset_imports\" not in kwargs:\n kwargs[\"opset_imports\"] = [onnx.helper.make_opsetid(\"\", graph.opset)]\n\n return onnx.helper.make_model(onnx_graph, **kwargs)",
"def add_module(self, module):\n self.msg(3, 'add_module', module)\n\n # If no node exists for this module, add such a node.\n module_added = self.findNode(module.identifier)\n if module_added is None:\n self.addNode(module)\n else:\n assert module == module_added, 'New module %r != previous %r.' % (module, module_added)\n\n # If this module has a previously added parent, reference this module to\n # its parent and add this module to its parent's namespace.\n parent_name, _, module_basename = module.identifier.rpartition('.')\n if parent_name:\n parent = self.findNode(parent_name)\n if parent is None:\n self.msg(4, 'add_module parent not found:', parent_name)\n else:\n self.createReference(module, parent)\n parent.add_submodule(module_basename, module)",
"def addOutputModule(self, moduleName, dataTier, **params):\n logging.info(\"Called addOutputModule with %s , %s , %s\" % (moduleName, dataTier, params))\n\n import FWCore.ParameterSet.Config as cms\n\n outputModule = cms.OutputModule(\n \"PoolOutputModule\",\n fileName = cms.untracked.string(\"%s.root\" % moduleName)\n )\n\n outputModule.dataset = cms.untracked.PSet(dataTier = cms.untracked.string(dataTier))\n\n## if params['compressionLevel'] != None:\n## outputModule.compressionLevel = cms.untracked.int32(params['compressionLevel'])\n\n if params['setEventContentInOutput']:\n if ( dataTier == 'ALCARECO' ) and \\\n ( ( self.cmssw['version1'] > 3 ) \\\n or ( self.cmssw['version1'] == 3 and self.cmssw['version2'] > 2 ) \\\n or ( self.cmssw['version1'] == 3 and self.cmssw['version2'] == 2 and self.cmssw['version3'] >= 0 ) ):\n outputCommands = cms.untracked.vstring(\n 'drop *',\n 'keep edmTriggerResults_*_*_*',\n 'keep *_ALCARECOTkAlCosmicsCTF_*_*',\n 'keep *_ALCARECOTkAlCosmicsCosmicTF_*_*',\n 'keep *_ALCARECOTkAlCosmicsRS_*_*',\n 'keep *_eventAuxiliaryHistoryProducer_*_*',\n 'keep L1GlobalTriggerReadoutRecord_gtDigis_*_*',\n 'keep L1MuGMTReadoutCollection_gtDigis_*_*',\n 'keep Si*Cluster*_si*Clusters_*_*',\n 'keep *_MEtoEDMConverter_*_*',\n 'keep *_ALCARECOTkAlCosmicsCTF_*_*',\n 'keep *_ALCARECOTkAlCosmicsCosmicTF_*_*',\n 'keep *_ALCARECOTkAlCosmicsRS_*_*',\n 'keep *_eventAuxiliaryHistoryProducer_*_*',\n 'keep L1GlobalTriggerReadoutRecord_gtDigis_*_*',\n 'keep L1MuGMTReadoutCollection_gtDigis_*_*',\n 'keep Si*Cluster*_si*Clusters_*_*',\n 'keep *_MEtoEDMConverter_*_*',\n 'keep *_ALCARECOTkAlCosmics*0T_*_*',\n 'keep *_eventAuxiliaryHistoryProducer_*_*',\n 'keep L1GlobalTriggerReadoutRecord_gtDigis_*_*',\n 'keep L1MuGMTReadoutCollection_gtDigis_*_*',\n 'keep Si*Cluster*_si*Clusters_*_*',\n 'keep *_MEtoEDMConverter_*_*',\n 'keep *_ALCARECOTkAlCosmics*0T_*_*',\n 'keep *_eventAuxiliaryHistoryProducer_*_*',\n 'keep L1GlobalTriggerReadoutRecord_gtDigis_*_*',\n 'keep L1MuGMTReadoutCollection_gtDigis_*_*',\n 'keep Si*Cluster*_si*Clusters_*_*',\n 'keep *_MEtoEDMConverter_*_*',\n 'keep *_ALCARECOSiStripCalZeroBias_*_*',\n 'keep *_calZeroBiasClusters_*_*',\n 'keep *_MEtoEDMConverter_*_*',\n 'keep HOCalibVariabless_*_*_*',\n 'keep *_ALCARECOMuAlStandAloneCosmics_*_*',\n 'keep *_muonCSCDigis_*_*',\n 'keep *_muonDTDigis_*_*',\n 'keep *_muonRPCDigis_*_*',\n 'keep *_dt1DRecHits_*_*',\n 'keep *_dt2DSegments_*_*',\n 'keep *_dt4DSegments_*_*',\n 'keep *_csc2DRecHits_*_*',\n 'keep *_cscSegments_*_*',\n 'keep *_rpcRecHits_*_*',\n 'keep *_ALCARECOMuAlGlobalCosmics_*_*',\n 'keep *_muonCSCDigis_*_*',\n 'keep *_muonDTDigis_*_*',\n 'keep *_muonRPCDigis_*_*',\n 'keep *_dt1DRecHits_*_*',\n 'keep *_dt2DSegments_*_*',\n 'keep *_dt4DSegments_*_*',\n 'keep *_csc2DRecHits_*_*',\n 'keep *_cscSegments_*_*',\n 'keep *_rpcRecHits_*_*',\n 'keep *_ALCARECOMuAlCalIsolatedMu_*_*',\n 'keep *_muonCSCDigis_*_*',\n 'keep *_muonDTDigis_*_*',\n 'keep *_muonRPCDigis_*_*',\n 'keep *_dt1DRecHits_*_*',\n 'keep *_dt2DSegments_*_*',\n 'keep *_dt4DSegments_*_*',\n 'keep *_csc2DRecHits_*_*',\n 'keep *_cscSegments_*_*',\n 'keep *_rpcRecHits_*_*',\n 'keep *_muonDTDigis_*_*',\n 'keep CSCDetIdCSCWireDigiMuonDigiCollection_*_*_*',\n 'keep CSCDetIdCSCStripDigiMuonDigiCollection_*_*_*',\n 'keep DTLayerIdDTDigiMuonDigiCollection_*_*_*',\n 'keep *_dt4DSegments_*_*',\n 'keep *_cscSegments_*_*',\n 'keep *_rpcRecHits_*_*',\n 'keep RPCDetIdRPCDigiMuonDigiCollection_*_*_*',\n 'keep recoMuons_muonsNoRPC_*_*',\n 'keep L1MuRegionalCands_*_RPCb_*',\n 'keep L1MuRegionalCands_*_RPCf_*',\n 'keep L1MuGMTCands_*_*_*',\n 'keep L1MuGMTReadoutCollection_*_*_*'),\n## outputModule.outputCommands = cms.untracked.vstring(\n## 'drop *',\n## 'keep edmTriggerResults_*_*_*',\n## 'keep *_ALCARECOMuAlStandAloneCosmics_*_*',\n## 'keep *_ALCARECOMuAlGlobalCosmics_*_*',\n## 'keep *_ALCARECOMuAlCalIsolatedMu_*_*',\n## 'keep *_cosmicMuons_*_*',\n## 'keep *_cosmictrackfinderP5_*_*',\n## 'keep Si*Cluster*_*_*_*',\n## 'keep *_muonCSCDigis_*_*',\n## 'keep *_muonDTDigis_*_*',\n## 'keep *_muonRPCDigis_*_*',\n## 'keep *_dt1DRecHits_*_*',\n## 'keep *_dt2DSegments_*_*',\n## 'keep *_dt4DSegments_*_*',\n## 'keep *_csc2DRecHits_*_*',\n## 'keep *_cscSegments_*_*',\n## 'keep *_rpcRecHits_*_*',\n## 'keep HOCalibVariabless_*_*_*',\n## 'keep *_ALCARECOTkAlCosmicsCTF_*_*',\n## 'keep *_ALCARECOTkAlCosmicsCosmicTF_*_*',\n## 'keep *_ALCARECOTkAlCosmicsRS_*_*',\n## 'keep *_ALCARECOTkAlCosmics*0T_*_*',\n## 'keep *_eventAuxiliaryHistoryProducer_*_*',\n## 'keep L1GlobalTriggerReadoutRecord_gtDigis_*_*',\n## 'keep *_MEtoEDMConverter_*_*',\n## 'keep CSCDetIdCSCWireDigiMuonDigiCollection_*_*_*',\n## 'keep CSCDetIdCSCStripDigiMuonDigiCollection_*_*_*',\n## 'keep DTLayerIdDTDigiMuonDigiCollection_*_*_*',\n## 'keep RPCDetIdRPCDigiMuonDigiCollection_*_*_*',\n## 'keep L1MuGMTCands_*_*_*',\n## 'keep L1MuGMTReadoutCollection_*_*_*')\n elif dataTier == 'FEVTHLTALL':\n installEventContent(self.process, \"FEVT\", outputModule)\n outputModule.outputCommands.append('keep *_*_*_HLT')\n else:\n installEventContent(self.process, dataTier, outputModule)\n\n if params['primaryDataset'] != None:\n outputModule.dataset.primaryDataset = cms.untracked.string(params['primaryDataset'])\n\n if params['selectEvents'] != None:\n outputModule.SelectEvents = cms.untracked.PSet(\n SelectEvents = cms.vstring()\n )\n for selCond in params['selectEvents']:\n outputModule.SelectEvents.SelectEvents.append(selCond)\n\n setattr(self.process, moduleName, outputModule)\n\n return",
"def test_set_node_name_for_matmul_add_linear(self, export_args):\n class Linear(torch.nn.Module):\n def __init__(self):\n super(Linear, self).__init__()\n self.linear = torch.nn.Linear(3, 2)\n\n def forward(self, inp):\n x = self.linear(inp)\n return x\n\n model = Linear()\n # Using an input to linear op with dimension != 2 causes torch to use matmul->add instead of gemm op\n onnx_path = './data/MyModel.onnx'\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input=torch.randn(1, 1, 3), onnx_export_args=copy.deepcopy(export_args))\n onnx_model = onnx.load(onnx_path)\n expected_node_names = ['linear', 'linear#1.end']\n\n actual_node_names = [node.name for node in onnx_model.graph.node]\n for name in expected_node_names:\n assert name in actual_node_names\n\n expected_param_names = ['linear.weight', 'linear.bias']\n _, valid_param_set = onnx_utils.OnnxSaver.get_onnx_node_to_io_tensor_names_map(onnx_model)\n for name in expected_param_names:\n assert name in valid_param_set\n\n # Check that gemm still works as expected\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input=torch.randn(1, 3), onnx_export_args=copy.deepcopy(export_args))\n onnx_model = onnx.load(onnx_path)\n\n actual_node_names = [node.name for node in onnx_model.graph.node]\n assert 'linear' in actual_node_names\n assert 'linear#1' not in actual_node_names\n\n expected_param_names = ['linear.weight', 'linear.bias']\n _, valid_param_set = onnx_utils.OnnxSaver.get_onnx_node_to_io_tensor_names_map(onnx_model)\n for name in expected_param_names:\n assert name in valid_param_set\n\n self.check_onnx_node_name_uniqueness(onnx_model)\n\n if os.path.exists(onnx_path):\n os.remove(onnx_path)",
"def exportAssetAssembly(name, rigTopNode, meshTopNode, path, postScript=None):\n if pm.ls(rigTopNode):\n rigTopNode = pm.PyNode(rigTopNode)\n else:\n pm.displayError(\n \"{} doesn't exist or duplicated. Please check your \"\n \"scene\".format(rigTopNode))\n return\n\n if pm.ls(meshTopNode):\n meshTopNode = pm.PyNode(meshTopNode)\n else:\n pm.displayError(\n \"{} doesn't exist or duplicated. Please check \"\n \"your scene\".format(meshTopNode))\n return\n # check the folder and script\n # if the target name exist abort and request another name\n\n deformer_jnts = rigTopNode.rigGroups[3].connections()[0].members()\n if not deformer_jnts:\n pm.displayError(\n \"{} is empty. The tool can't find any joint\".format(meshTopNode))\n\n # export connections and cut joint connections\n file_path = os.path.join(path, name + \".jmm\")\n dm_nodes = exportConnections(source=deformer_jnts,\n filePath=file_path,\n disc=True)\n\n # cut al possible remaining connection and adjust hierarchy\n # joint or visibility\n jnt_org = pm.PyNode(\"jnt_org\")\n pm.disconnectAttr(rigTopNode.jnt_vis, jnt_org.visibility)\n\n # restructure model\n model = pm.createNode(\"transform\",\n n=\"model\",\n p=None,\n ss=True)\n pm.addAttr(model, ln=\"rigGroups\", at='message', m=1)\n pm.parent(meshTopNode, jnt_org, model)\n\n # disconnect jnt set\n sets = rigTopNode.listConnections(type=\"objectSet\")\n\n deformersGrp = None\n for oSet in sets:\n if \"deformers_grp\" in oSet.name():\n deformersGrp = oSet\n\n if deformersGrp:\n for cnx in deformersGrp.message.listConnections(p=True):\n pm.disconnectAttr(deformersGrp.message, cnx)\n pm.connectAttr(deformersGrp.message, model.attr(\"rigGroups[0]\"))\n\n # disconnect bindPoses\n dg_poses = rigTopNode.message.listConnections(type=\"dagPose\", p=True)\n for dgp in dg_poses:\n if dgp.node().name().startswith(\"bindPose\"):\n pm.disconnectAttr(rigTopNode.message, dgp)\n\n # post script\n if postScript:\n try:\n exec(compile(open(postScript, \"rb\").read(), postScript, 'exec'))\n except Exception as ex:\n template = \"An exception of type {0} occured. Arguments:\\n{1!r}\"\n message = template.format(type(ex).__name__, ex.args)\n pm.displayError(message)\n cont = pm.confirmBox(\"FAIL: Script Fail\",\n \"Do you want to export anyway?\" + \"\\n\\n\"\n + message + \"\\n\\n\" + traceback.format_exc(),\n \"Continue\", \"Cancel\")\n if not cont:\n pm.undo()\n return\n\n # export rig model\n pm.select(dm_nodes, r=True)\n pm.select(rigTopNode, add=True)\n file_path = os.path.join(path, name + \"_rig.ma\")\n exp = pm.exportSelected(file_path, f=True, type=\"mayaAscii\")\n pm.displayInfo(exp)\n\n # export mesh and joints\n pm.select(model, r=True)\n file_path = os.path.join(path, name + \"_model.ma\")\n exp = pm.exportSelected(file_path, f=True, type=\"mayaAscii\")\n pm.displayInfo(exp)",
"def export(model, args, filename=None, export_params=True,\n graph_name='Graph', save_text=False, opset_version=None):\n\n _check_available()\n\n chainer.config.train = False\n chainer.config.enable_backprop = True\n\n if opset_version is None:\n opset_version = int(onnx.defs.onnx_opset_version())\n elif opset_version < MINIMUM_OPSET_VERSION:\n warnings.warn(\n 'ONNX-Chainer has been tested only with opset_version >= {m}. '\n 'This is because ONNXRuntime supports only opset_version >= {m}. '\n 'The ONNX file exported with your requested opset_version ({o}) '\n 'may cause some problems because the converters used for the '\n 'opset_version have not been tested.'.format(\n m=MINIMUM_OPSET_VERSION,\n o=opset_version)\n )\n\n # Forward computation\n network_inputs = []\n if isinstance(args, tuple):\n args = list(args)\n if isinstance(args, list):\n for i, arg in enumerate(args):\n if isinstance(arg, chainer.get_array_types()):\n args[i] = chainer.Variable(arg)\n network_inputs.append(args[i])\n flat_args = args\n outputs = model(*args)\n elif isinstance(args, dict):\n for key, arg in args.items():\n if isinstance(arg, chainer.get_array_types()):\n args[key] = chainer.Variable(arg)\n network_inputs.append(args[key])\n flat_args = list(args.values())\n outputs = model(**args)\n elif isinstance(args, chainer.get_array_types()):\n args = chainer.Variable(args)\n network_inputs.append(args)\n flat_args = [args]\n outputs = model(args)\n elif isinstance(args, chainer.Variable):\n network_inputs.append(args)\n flat_args = [args]\n outputs = model(args)\n else:\n raise ValueError(\n 'The \\'args\\' argument should be a list, tuple, dict, '\n 'numpy array, or Chainer Variable. But a {} object was '\n 'given.'.format(type(args)))\n\n initializers = []\n input_tensors = []\n param_names = set()\n for param in model.params():\n param_names.add(str(id(param)))\n tensor = convert_parameter(param)\n initializers.append(tensor)\n input_tensors.append(helper.make_tensor_value_info(\n str(id(param)), tensor.data_type, tensor.dims))\n\n network_input_names = set()\n for i in network_inputs:\n network_input_names.add(str(id(i)))\n input_tensors.append(helper.make_tensor_value_info(\n str(id(i)), NP_TYPE_TO_TENSOR_TYPE[i.dtype], i.shape))\n\n with ONNXExport(opset_version) as o:\n if isinstance(outputs, (list, tuple)):\n flat_outputs = outputs\n elif isinstance(outputs, dict):\n flat_outputs = list(outputs.values())\n elif isinstance(outputs, chainer.Variable):\n flat_outputs = [outputs]\n else:\n raise RuntimeError(\n 'Unexpected output type from the model: {}'.format(\n type(outputs)))\n chainer.grad(flat_outputs, list(model.params()) + flat_args)\n\n implicit_input_names = set(o.inputs.keys()) - param_names -\\\n network_input_names\n for name in implicit_input_names:\n tensor = convert_parameter(o.inputs[name])\n initializers.append(tensor)\n input_tensors.append(helper.make_tensor_value_info(\n name, tensor.data_type, tensor.dims))\n\n # If additional parameters are created during conversion\n if o.additional_parameters:\n for param in o.additional_parameters:\n tensor = convert_parameter(param)\n initializers.append(tensor)\n input_tensors.append(helper.make_tensor_value_info(\n str(id(param)), tensor.data_type, tensor.dims))\n\n # The graph must be topologically sorted\n graph = reversed(o.graph)\n\n # Convert output tensors\n output_tensors = []\n if isinstance(outputs, dict):\n outputs = list(outputs.values())\n if not isinstance(outputs, (list, tuple)):\n outputs = (outputs,)\n\n for output in outputs:\n output_id = str(id(output))\n if output_id in o.renamed_outputs:\n output_id = o.renamed_outputs[output_id]\n output_tensors.append(helper.make_tensor_value_info(\n output_id, NP_TYPE_TO_TENSOR_TYPE[output.dtype], output.shape))\n\n if not export_params:\n initializers = []\n\n onnx_graph = helper.make_graph(\n graph, graph_name, input_tensors, output_tensors,\n initializer=initializers)\n\n model = helper.make_model(\n onnx_graph,\n producer_name='Chainer',\n producer_version=chainer.__version__,\n opset_imports=[helper.make_opsetid('', opset_version)]\n )\n\n model.ir_version = onnx.IR_VERSION\n\n rename_tensors(model)\n checker.check_model(model)\n\n if filename is not None and isinstance(filename, str):\n with open(filename, 'wb') as fp:\n fp.write(model.SerializeToString())\n if save_text:\n with open(filename + '.txt', 'w') as fp:\n print(model, file=fp)\n elif hasattr(filename, 'write'):\n filename.write(model.SerializeToString())\n\n return model",
"def exec_module(self, module):\n\n if not self.filename.endswith(config.FILE_EXT) and not self.filename.endswith(\n \"__init__.py\"\n ):\n print(\"Fatal error: ExtensionLoader is asked to load a normal file.\")\n print(\"filename:\", self.filename)\n print(\"Expected extension:\", config.FILE_EXT)\n raise SystemExit\n\n name = module.__name__\n if module.__name__ == config.MAIN_MODULE_NAME:\n module.__name__ = \"__main__\"\n config.MAIN_MODULE_NAME = None\n\n with open(self.filename) as f:\n source = f.read()\n\n transforms.identify_requested_transformers(source)\n\n if config.TRANSFORMERS:\n original = source\n source = transforms.add_all_imports(source)\n source = transforms.apply_source_transformations(source)\n\n if config.DIFF and original != source:\n self.write_html_diff(name, original, source)\n\n if config.CONVERT and self.filename.endswith(config.FILE_EXT):\n print(\"############### Original source: ############\\n\")\n print(original)\n print(\"\\n############### Converted source: ############\\n\")\n print(source)\n print(\"=\" * 50, \"\\n\")\n\n source = transforms.apply_ast_transformations(source)\n exec(source, vars(module))",
"def mutate(self, module, operators, output):\n _, mutant_asts = self.generate_mutant_asts(module, operators)\n assert self.original_ast is not None\n\n mutated_modules = []\n mut_num = 0\n with timeblock('Time for generating modules'):\n for (mutant_ast, operator) in mutant_asts:\n module_name = \"mutant_\" + operator[1].name()+'_'+operator[0].__name__+'_'+ str(mut_num)\n mut_num += 1\n mutated_module = self.generate_mutant_module(mutant_ast, module_name)\n mutated_modules.append((mutated_module, mutant_ast, operator))\n\n if output:\n MuUtilities.output(self.original_ast, mutant_ast, module_name)\n return mutated_modules",
"def _module_expansion_symbolic_trace(\n root: Union[torch.nn.Module, Callable[..., Any]],\n concrete_args: Optional[Dict[str, Any]] = None,\n) -> torch.fx.GraphModule:\n # For functions doesn't support symbolic tracing, create wrappers\n # which produce symbolic results during tracing.\n patched_torch_methods = {\n target_name: _wrap_for_symbolic_trace(getattr(torch, target_name))\n for target_name in _TORCH_METHODS_TO_PATCH\n }\n\n # Set the symbolic-tracing friendly functions so that `tracer.trace` below\n # can work.\n for name, (wrapper, _) in patched_torch_methods.items():\n setattr(torch, name, wrapper)\n\n try:\n # Set up a tracer.\n tracer = ModuleExpansionTracer()\n # Trace the model.\n graph = tracer.trace(root, concrete_args)\n name = (\n root.__class__.__name__\n if isinstance(root, torch.nn.Module)\n else root.__name__\n )\n return torch.fx.GraphModule(tracer.root, graph, name)\n finally:\n # Revert the patches for symbolic tracing.\n for name, (_, wrapped) in patched_torch_methods.items():\n # wrapped is the original version of `torch.name`.\n setattr(torch, name, wrapped)",
"def create_module(sbml_model_file, model_name, model_output_dir, condition_df,\n observable_df):\n\n from amici.petab_import import import_model\n import_model(sbml_model=sbml_model_file, observable_table=observable_df,\n model_name=model_name, model_output_dir=model_output_dir,\n verbose=True, condition_table=condition_df)",
"def _register_module_returns(self):\n # Use nearest parent module algorithm\n for nd_struct in self._global_context.node_struct_collections.values():\n if not nd_struct.successor_nodes_names_external:\n # check if any edge to graph output\n has_graph_output = False\n for edge in nd_struct.fragment.metadata.get('outputs'):\n if edge in self._global_context.onnx_graph_info.get('graph_outputs'):\n has_graph_output = True\n break\n if not has_graph_output:\n continue # avoid unnecessary checking\n for base_out in nd_struct.outputs_manager.outputs:\n if base_out.onnx_edge_name in self._global_context.onnx_graph_info.get('graph_outputs'):\n MatcherHelper.register_outputs_to_main_model(base_out.onnx_edge_name, nd_struct)\n continue\n out_user_onnx_names = base_out.onnx_user\n for out_user_onnx_name in out_user_onnx_names:\n out_user_struct = \\\n self._global_context.onnx_node_name_to_node_struct_map.get(out_user_onnx_name)\n if out_user_struct is None:\n raise ValueError(f\"The Matcher detected an output has unknown provider for the edge \"\\\n f\"{base_out.onnx_edge_name}\")\n public_parent = MatcherHelper.get_public_parent_module(nd_struct, out_user_struct)\n nd_parent = nd_struct.parent_module_struct\n # Recursively register outputs to parents until the public module\n while public_parent.identifier != nd_parent.identifier:\n nd_parent.add_outputs_edge(base_out.onnx_edge_name)\n nd_parent = nd_parent.parent_module_struct",
"def __call__(self, node):\n\n # should throw an error\n if node.cfgInterface == None:\n return\n\n # //\n # // Extract LFN base from included WorkflowSpec parameters\n #//\n base = node.getParameter(\"UnmergedLFNBase\")[0]\n\n # //\n # // iterate over outputmodules/data tiers\n #// Generate LFN, PFN and Catalog for each module\n for modName, outModule in node.cfgInterface.outputModules.items():\n if ( not outModule.has_key('fileName') ):\n msg = \"OutputModule %s does not contain a fileName entry\" % modName\n raise RuntimeError, msg\n outModule['logicalFileName'] = os.path.join(base, outModule['dataTier'], str(self.lfnGroup))\n outModule['logicalFileName'] += '/'\n outModule['logicalFileName'] += outModule['fileName']\n\n return",
"def add(self, module, *from_nodes, **connections):\n assert module.name not in self.modules, \"There is already a module named %s\" % module.name\n self.modules[module.name] = comp\n\n if connections:\n for innode, outnode in connections.iteritems():\n self.connect(outnode, tuple(module.name, innode))\n elif from_nodes:\n for outnode, innode in zip(from_nodes, module.input_nodes()):\n self.connect(outnode, innode)\n\n return self",
"def mutate_ncp(parent, child_cp, parameters):\r\n\r\n if not parameters['one_child_keep_child']:\r\n child_nn = NeuralNetworkBuilder.clone_neural_network(parent.ncp.nn)\r\n else:\r\n child_nn = parent.ncp.nn\r\n\r\n if parameters['ncp_fully_connect_mutation'] or parameters['ncp_only_mutation_nodes']:\r\n child_nn.mutation_input_layer = []\r\n\r\n added_input_layer_X = None\r\n\r\n if not parameters['only_ncp']:\r\n # neuron_id = len(child_nn.input_layer)\r\n\r\n added_input_layer_X = concatenate(child_cp.conv_network.output_layer.mutation_semantics, axis=1)\r\n\r\n if parameters['recompute']:\r\n del child_cp.conv_network.output_layer.mutation_semantics\r\n for node, tensor in zip(child_cp.conv_network.output_layer.mutation_nodes, child_cp.conv_network.output_layer.mutation_tensors):\r\n del node\r\n del tensor\r\n del child_cp.conv_network.output_layer.mutation_nodes\r\n del child_cp.conv_network.output_layer.mutation_tensors\r\n\r\n # for node in child_cp.conv_network.output_layer.mutation_nodes:\r\n #\r\n # if added_input_layer_X is None:\r\n # added_input_layer_X = node.semantics\r\n # else:\r\n # added_input_layer_X = concatenate((added_input_layer_X, node.semantics), axis=1)\r\n #\r\n # for _ in node.semantics.T:\r\n # new_input_neuron = InputNeuron(neuron_id, None)\r\n # #===============================================================\r\n # # new_input_neuron = InputNeuron(neuron_id, input_data)\r\n # #===============================================================\r\n # neuron_id += 1\r\n # if parameters['ncp_fully_connect_mutation'] or parameters['ncp_only_mutation_nodes']:\r\n # child_nn.mutation_input_layer.append(new_input_neuron)\r\n # child_nn.input_layer.append(new_input_neuron)\r\n # if parameters['recompute']:\r\n # del node.semantics\r\n\r\n if parameters['only_ncp']:\r\n added_test_input_layer_X = None\r\n original_test_X = parameters['X_test']\r\n channels = original_test_X.shape[3]\r\n for i in range(channels):\r\n X = original_test_X[:, :, :, i]\r\n X = X.reshape((X.shape[0], X.shape[1] * X.shape[2]))\r\n if added_test_input_layer_X is None:\r\n added_test_input_layer_X = X\r\n else:\r\n added_test_input_layer_X = concatenate((added_test_input_layer_X, X), axis=1)\r\n else:\r\n added_test_input_layer_X = concatenate(child_cp.conv_network.output_layer.mutation_test_semantics, axis=1)\r\n\r\n if parameters['recompute'] and not parameters['only_ncp']:\r\n del child_cp.conv_network.output_layer.mutation_test_semantics\r\n\r\n\r\n random_state = parameters['random_state']\r\n learning_step = 'optimized'\r\n sparseness = { 'sparse': parameters['ncp_sparseness'],\r\n 'minimum_sparseness': parameters['ncp_min_sparseness'],\r\n 'maximum_sparseness': parameters['ncp_max_sparseness'],\r\n 'fully_connect_mutation_nodes' : parameters['ncp_fully_connect_mutation'],\r\n 'only_mutation_nodes' : parameters['ncp_only_mutation_nodes'],\r\n 'min_output_sparseness' : parameters['min_output_sparseness'],\r\n 'max_output_sparseness' : parameters['max_output_sparseness'],\r\n 'prob_skip_connection': 0}\r\n\r\n #===========================================================================\r\n # sparseness = { 'sparse': False, 'minimum_sparseness': 0, 'maximum_sparseness': 1, 'prob_skip_connection': 0}\r\n #===========================================================================\r\n\r\n maximum_new_neurons_per_layer = parameters['ncp_max_mutation_nodes']\r\n minimum_new_neurons_per_layer = parameters['ncp_min_mutation_nodes']\r\n\r\n maximum_bias_weight = parameters['ncp_max_bias_weight']\r\n maximum_neuron_connection_weight = parameters['ncp_max_connection_weight']\r\n\r\n X = None\r\n #X = parameters['X']\r\n y = parameters['y']\r\n global_preds = parent.get_predictions()\r\n delta_target = y - global_preds\r\n hidden_activation_functions_ids = [parameters['ncp_activation']]\r\n prob_activation_hidden_layers = 1\r\n child_nn, times = mutate_hidden_layers(added_input_layer_X, added_test_input_layer_X, X, y, child_nn, random_state, learning_step, sparseness, maximum_new_neurons_per_layer, maximum_neuron_connection_weight, maximum_bias_weight, delta_target, global_preds, hidden_activation_functions_ids, prob_activation_hidden_layers, params=parameters, minimum_new_neurons_per_layer=minimum_new_neurons_per_layer)\r\n\r\n if parameters['ncp_clear_semantics']:\r\n child_nn.clear_hidden_semantics()\r\n if parameters['ncp_only_mutation_nodes'] and not parameters['only_ncp']:\r\n # [input_neuron.clear_semantics() for input_neuron in child_nn.input_layer]\r\n [input_neuron.clear_semantics() for input_neuron in child_nn.mutation_input_layer]\r\n\r\n return NonConvolutionalPart(child_nn), times",
"def addExportLayerToCoreml(builder):\n outputNames = [output.name for output in builder.spec.description.output]\n\n for i, outputName in enumerate(outputNames):\n # formulas: https://github.com/ultralytics/yolov5/issues/471\n builder.add_activation(\n name=f\"sigmoid_{outputName}\",\n non_linearity=\"SIGMOID\",\n input_name=outputName,\n output_name=f\"{outputName}_sigmoid\",\n )\n\n ### Coordinates calculation ###\n # input (1, 3, nC, nC, 85), output (1, 3, nC, nC, 2) -> nC = 640 / strides[i]\n builder.add_slice(\n name=f\"slice_coordinates_xy_{outputName}\",\n input_name=f\"{outputName}_sigmoid\",\n output_name=f\"{outputName}_sliced_coordinates_xy\",\n axis=\"width\",\n start_index=0,\n end_index=2,\n )\n # x,y * 2\n builder.add_elementwise(\n name=f\"multiply_xy_by_two_{outputName}\",\n input_names=[f\"{outputName}_sliced_coordinates_xy\"],\n output_name=f\"{outputName}_multiplied_xy_by_two\",\n mode=\"MULTIPLY\",\n alpha=2,\n )\n # x,y * 2 - 0.5\n builder.add_elementwise(\n name=f\"subtract_0_5_from_xy_{outputName}\",\n input_names=[f\"{outputName}_multiplied_xy_by_two\"],\n output_name=f\"{outputName}_subtracted_0_5_from_xy\",\n mode=\"ADD\",\n alpha=-0.5,\n )\n grid = make_grid(featureMapDimensions[i], featureMapDimensions[i]).numpy()\n # x,y * 2 - 0.5 + grid[i]\n builder.add_bias(\n name=f\"add_grid_from_xy_{outputName}\",\n input_name=f\"{outputName}_subtracted_0_5_from_xy\",\n output_name=f\"{outputName}_added_grid_xy\",\n b=grid,\n shape_bias=grid.shape,\n )\n # (x,y * 2 - 0.5 + grid[i]) * stride[i]\n builder.add_elementwise(\n name=f\"multiply_xy_by_stride_{outputName}\",\n input_names=[f\"{outputName}_added_grid_xy\"],\n output_name=f\"{outputName}_calculated_xy\",\n mode=\"MULTIPLY\",\n alpha=strides[i],\n )\n\n # input (1, 3, nC, nC, 85), output (1, 3, nC, nC, 2)\n builder.add_slice(\n name=f\"slice_coordinates_wh_{outputName}\",\n input_name=f\"{outputName}_sigmoid\",\n output_name=f\"{outputName}_sliced_coordinates_wh\",\n axis=\"width\",\n start_index=2,\n end_index=4,\n )\n # w,h * 2\n builder.add_elementwise(\n name=f\"multiply_wh_by_two_{outputName}\",\n input_names=[f\"{outputName}_sliced_coordinates_wh\"],\n output_name=f\"{outputName}_multiplied_wh_by_two\",\n mode=\"MULTIPLY\",\n alpha=2,\n )\n # (w,h * 2) ** 2\n builder.add_unary(\n name=f\"power_wh_{outputName}\",\n input_name=f\"{outputName}_multiplied_wh_by_two\",\n output_name=f\"{outputName}_power_wh\",\n mode=\"power\",\n alpha=2,\n )\n # (w,h * 2) ** 2 * anchor_grid[i]\n anchor = (\n anchorGrid[i]\n .expand(-1, featureMapDimensions[i], featureMapDimensions[i], -1)\n .numpy()\n )\n builder.add_load_constant_nd(\n name=f\"anchors_{outputName}\",\n output_name=f\"{outputName}_anchors\",\n constant_value=anchor,\n shape=anchor.shape,\n )\n builder.add_elementwise(\n name=f\"multiply_wh_with_achors_{outputName}\",\n input_names=[f\"{outputName}_power_wh\", f\"{outputName}_anchors\"],\n output_name=f\"{outputName}_calculated_wh\",\n mode=\"MULTIPLY\",\n )\n\n builder.add_concat_nd(\n name=f\"concat_coordinates_{outputName}\",\n input_names=[f\"{outputName}_calculated_xy\", f\"{outputName}_calculated_wh\"],\n output_name=f\"{outputName}_raw_coordinates\",\n axis=-1,\n )\n builder.add_scale(\n name=f\"normalize_coordinates_{outputName}\",\n input_name=f\"{outputName}_raw_coordinates\",\n output_name=f\"{outputName}_raw_normalized_coordinates\",\n W=torch.tensor([1 / 640]).numpy(),\n b=0,\n has_bias=False,\n )\n\n ### Confidence calculation ###\n builder.add_slice(\n name=f\"slice_object_confidence_{outputName}\",\n input_name=f\"{outputName}_sigmoid\",\n output_name=f\"{outputName}_object_confidence\",\n axis=\"width\",\n start_index=4,\n end_index=5,\n )\n builder.add_slice(\n name=f\"slice_label_confidence_{outputName}\",\n input_name=f\"{outputName}_sigmoid\",\n output_name=f\"{outputName}_label_confidence\",\n axis=\"width\",\n start_index=5,\n end_index=0,\n )\n # confidence = object_confidence * label_confidence\n builder.add_multiply_broadcastable(\n name=f\"multiply_object_label_confidence_{outputName}\",\n input_names=[\n f\"{outputName}_label_confidence\",\n f\"{outputName}_object_confidence\",\n ],\n output_name=f\"{outputName}_raw_confidence\",\n )\n\n # input: (1, 3, nC, nC, 85), output: (3 * nc^2, 85)\n builder.add_flatten_to_2d(\n name=f\"flatten_confidence_{outputName}\",\n input_name=f\"{outputName}_raw_confidence\",\n output_name=f\"{outputName}_flatten_raw_confidence\",\n axis=-1,\n )\n builder.add_flatten_to_2d(\n name=f\"flatten_coordinates_{outputName}\",\n input_name=f\"{outputName}_raw_normalized_coordinates\",\n output_name=f\"{outputName}_flatten_raw_coordinates\",\n axis=-1,\n )\n\n builder.add_concat_nd(\n name=\"concat_confidence\",\n input_names=[\n f\"{outputName}_flatten_raw_confidence\" for outputName in outputNames\n ],\n output_name=\"raw_confidence\",\n axis=-2,\n )\n builder.add_concat_nd(\n name=\"concat_coordinates\",\n input_names=[\n f\"{outputName}_flatten_raw_coordinates\" for outputName in outputNames\n ],\n output_name=\"raw_coordinates\",\n axis=-2,\n )\n\n builder.set_output(\n output_names=[\"raw_confidence\", \"raw_coordinates\"],\n output_dims=[(25200, numberOfClassLabels), (25200, 4)],\n )",
"def export(function, toModule, asName=None, log=True):\n\tmod = lookupModule(toModule, False)\n\tif asName is None:\n\t\tasName = function.__name__\n\tif log:\n\t\tif asName != function.__name__:\n\t\t\tdebug(\"BugUtil - exporting %s.%s as %s.%s\", function.__module__, function.__name__, toModule, asName)\n\t\telse:\n\t\t\tdebug(\"BugUtil - exporting %s.%s to %s\", function.__module__, asName, toModule)\n\tsetattr(mod, asName, function)"
] | [
"0.6208346",
"0.57180774",
"0.5517651",
"0.5326858",
"0.53166723",
"0.51689684",
"0.5160342",
"0.49694705",
"0.49519387",
"0.49379078",
"0.4789286",
"0.47887295",
"0.47632617",
"0.47464138",
"0.47271678",
"0.4721467",
"0.46698704",
"0.46584198",
"0.46493828",
"0.46453112",
"0.4614797",
"0.46011654",
"0.45894042",
"0.457264",
"0.4539643",
"0.45387027",
"0.45279917",
"0.45262757",
"0.4526244",
"0.45081034"
] | 0.6967324 | 0 |
Restore a queue from a message reader. This publishes to the queue any messages returned by the reader. Any existing messages in the queue will still be in the queue. | def restore(self, reader):
while True:
msg = reader.read()
if msg is None:
break
self.publish(msg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def recover(self):\n if self._message_storage:\n for neighbor in self.neighbors:\n self.channel.queue_declare(queue=str(self.id) + str(neighbor))\n for message in self._message_storage:\n self.channel.basic_publish(\n exchange=\"\",\n routing_key=str(self.id) + str(neighbor),\n body=message,\n )\n\n for neighbor in self.neighbors:\n for _, _, body in self.channel.consume(\n queue=str(neighbor) + str(self.id), auto_ack=True, inactivity_timeout=5\n ):\n if body is not None:\n message = body.decode(\"utf-8\")\n if message != \"marker\":\n self.states.append(message)\n else:\n self.channel.cancel()",
"def reader(handle, input_queue):\n input_queue.put(handle.read())",
"def dequeue_content(queue):\n if queue.front is None:\n raise IndexError\n queue.front = queue.front.rest\n if queue.front is None:\n queue.back = None",
"def remove_queue(self, queue) -> None:\r\n self.receive_queues.remove(queue)",
"def reset_queue(self, name, namespace):\n raise RuntimeError('Archives are read-only')",
"def flushMsgs(self):\n\n self.queue = self.pre_queue[:]\n self.pre_queue = []",
"def restore(self, restore):\n self._restore = restore",
"def pop_queue(self):\n response = self.sqs_client.receive_message(\n QueueUrl=self.sqs_queue, WaitTimeSeconds=20)\n if response:\n messages = response.get(\"Messages\")\n if messages:\n # we only receive one message at a time\n handle = messages[0].get(\"ReceiptHandle\")\n body = messages[0].get(\"Body\")\n return handle, body\n return None, None",
"def restore(self):\r\n token, stream, line, col = self.pushes.pop()\r\n self.token = token\r\n self.stream = stream\r\n self.line = line\r\n self.column = col",
"def pop_message(self):\n app_process = sqlite3.connect('app_process::memory:', check_same_thread=False)\n app_process_cursor = app_process.cursor()\n app_process_cursor.execute(\"SELECT * FROM message_queue WHERE tstamp==(:first)\", {\"first\":self.mq_first}) \n item = app_process_cursor.fetchone()\n if item == None:\n return\n app_process_cursor.execute(\"DELETE FROM message_queue WHERE tstamp==(:first)\", {\"first\":self.mq_first})\n self.mq_first = item[4] #now sets first to next item pointed to\n app_process.commit()\n app_process.close()\n return item",
"def drop_message(self):\n heapq.heappop(self._message_queue)",
"def dequeue_message(self) -> MessageQueueItem:\n return heapq.heappop(self._message_queue)",
"def dequeue(queue):\n item = front(queue)\n queue.front = queue.front.next\n if empty_queue(queue):\n queue.back = None\n\n queue.size = queue.size - 1\n\n return item",
"def queue_to_stack(queue):\n stack = Stack()\n check_list = []\n\n while len(queue) != 0:\n check_list.append(queue.dequeue())\n\n check_list.reverse()\n\n while check_list != []:\n stack.push(check_list[0])\n check_list.remove(check_list[0])",
"def restore_messages(self, org, messages):\n pass",
"def clear_queue(self):\n\t\t\tself.message_queue.clear()\n\t\t\treturn self.message_queue",
"def __clear_message_queue(self):\r\n self.__lib.CC_ClearMessageQueue(self.__serno)",
"def test_dequeue(self):\n dest = '/queue/foo'\n frame = Frame('MESSAGE', headers={'message-id': str(uuid.uuid4())}, body='some data') \n self.store.enqueue(dest, frame)\n \n assert self.store.has_frames(dest) == True\n assert self.store.size(dest) == 1\n \n rframe = self.store.dequeue(dest)\n assert frame == rframe\n \n # We cannot generically assert whether or not frame and rframe are\n # the *same* object. \n \n assert self.store.has_frames(dest) == False\n assert self.store.size(dest) == 0",
"def pop_queue(self, queue=None):\n if not queue:\n return False\n \n cur = self.conn.cursor()\n cur.execute(\"LOCK TABLE \" + queue + \" IN ACCESS EXCLUSIVE MODE;\")\n\n cur.execute(\"SELECT id FROM \" + queue + \" LIMIT 1;\")\n row = cur.fetchone()\n self.conn.commit()\n \n if row:\n cur.execute(\"DELETE FROM \" + queue + \" WHERE id='\"+str(row[0])+\"';\")\n return row[0]\n else:\n return False",
"def _dequeue(self):\n return self._queue.popleft()",
"def replace_recv_queue(self, new_queue):\n self.recv_queue_lock.acquire()\n while not self.recv_queue.empty():\n new_queue.put(self.recv_queue.get(True), True)\n self.recv_queue = new_queue\n self.recv_queue_lock.release()",
"def pop_messages(self):\n msge = self.received_messages\n self.received_messages = []\n return msge",
"def pop_message(self, queue_name):\r\n messages = self.pop_messages(queue_name, count=1)\r\n if messages['item_count'] > 0:\r\n return messages['items'][0]\r\n else:\r\n return None",
"def dequeue(self, irc, msg, args):\n pos = self._find_in_queue(msg.nick)\n if pos < 0:\n irc.reply(\"You're not in the queue, did your nick change?\")\n return\n self._queue.pop(pos)\n self._count -= 1\n self._dump_queue()\n irc.reply(\"Removed you from the queue as requested\")",
"def move_recent_srobbles_artist_down_queue():\n conn = psycopg2.connect(\"dbname=artistqdb host=localhost user=postgres\")\n cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n\n # Find artists in queue who were played yesterday\n # insert that artist with that most recent date in the queue\n cur.execute(\"\"\"insert into artist_queue (artist, last_scrobble_date)\n select s.artist,\n max(s.scrobble_date)\n from scrobbles s\n join artist_queue q on s.artist = q.artist\n where s.scrobble_date>now()-interval '2' day\n group by s.artist\"\"\")\n\n # Delete any older duplicates\n cur.execute(\"\"\"delete from artist_queue as l\n using artist_queue as r\n where l.artist = r.artist\n and l.id < r.id\"\"\")\n\n # Make the changes persistent in the database and end communications\n conn.commit()\n cur.close()\n conn.close()",
"def dump_queue(self):\n self.set_polling_many(self.queue)\n self.queue = []",
"def original(self):\n return ReadMessage(self._original_data)",
"def put_message(cls, message):\n rp = cls.get()\n rp.queue_receive.put(message)",
"def pull(self, timeout=60, factory=SQSItem):\n self._connect()\n message = self.queue.read(timeout)\n if message is not None:\n body = message.get_body()\n data = json.loads(body)\n item = factory(data)\n\n # Mark for futher deletion, but do not refer to it from ouselves (for garbage collectors).\n if item is not None:\n item.__dict__['_queue_'] = self.queue\n item.__dict__['_message_'] = message\n\n return item\n else:\n return None",
"def dequeue(self):\n if not self.outbound_stack:\n while self.inbound_stack:\n self.outbound_stack.append(self.inbound_stack.pop())\n return self.outbound_stack.pop()"
] | [
"0.5806696",
"0.563494",
"0.56112945",
"0.5548665",
"0.54663706",
"0.54382193",
"0.536221",
"0.5321514",
"0.53204316",
"0.5261433",
"0.5235016",
"0.5229662",
"0.5212791",
"0.5189782",
"0.51616454",
"0.5141593",
"0.5140806",
"0.51346123",
"0.5111369",
"0.50946546",
"0.50933266",
"0.50639546",
"0.50520205",
"0.5041746",
"0.49915555",
"0.49813965",
"0.49735758",
"0.4969012",
"0.4960812",
"0.4957322"
] | 0.7559581 | 0 |
pull alarm from queue if you want | def pull_alarm(self):
self.job = MATCH_QUEUE.take(timeout=settings.QUEUE_WAIT_TIMEOUT)
if not self.job:
raise lock.PassEmpty
# JSON数据格式,反序列化
try:
self.alarm_list = map(json.loads, self.job.body.strip().splitlines())
except Exception as error:
logger.warning(
'match alarm pull error:%s, %s, please check job is json serialized',
error,
self.job.body) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def alarm(self, interval, call):",
"async def alarm(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:\n self.__read_verbose_param(context)\n chat_id = update.effective_message.chat_id\n job_removed = remove_job_if_exists(str(chat_id), context)\n due = 1.0\n context.job_queue.run_repeating(\n self.__send_alarm, due, chat_id=chat_id, name=str(chat_id), data=due\n )\n\n self.__arm()\n msg = \"Alarm set! ✅\"\n if job_removed:\n msg += \" Old one was removed.\"\n await update.message.reply_text(msg)",
"def alarm(bot, job):\n chat_id = job.context[0]\n try:\n timer_name = job.context[1]\n bot.send_message(chat_id, text=f'Beep: {timer_name}!')\n except IndexError:\n bot.send_message(chat_id, text='Beep!')",
"def on_queue_declared(frame):\n channel.basic_consume(handle_delivery, queue='test')",
"def get_alarm_by_tag(self, tag):\n\n alarm = self._alarm_manager.get_alarm_by_tag(tag)\n\n return alarm",
"def get_alarm(self, name):\n\n alarm = self._alarm_manager.get_alarm(name)\n\n return alarm",
"def alarm_message_received(msg):\n data = IndexIntData.from_msg(msg)\n if data is None:\n _LOGGER.error(\"Undable to parse MQTT message\")\n\n if data.value == 1:\n self._alarm.add(data.index)\n else:\n self._alarm.discard(data.index)\n\n _LOGGER.debug(\"Alarm: %s\", self._alarm)\n self.async_write_ha_state()",
"def alarmoff() :\n s.alarm(False, \"\")",
"def alarmon(alarmName=\"\") :\n s.alarm(True, alarmName)",
"def get_from_queue(self):\n while not self.receive_queue.empty():\n cmd, kwargs = bcp.decode_command_string(\n self.receive_queue.get(False))\n self._process_command(cmd, **kwargs)",
"def alarm(bot, job):\n message = MESSAGES[job.context]\n if len(message) <= 0:\n message = \"Alert set for right now\"\n bot.sendMessage(job.context, text=message)",
"def is_alarm():\n return _alarm",
"def fetch_job_from_queue(self):\n while 1:\n time.sleep(2)\n try:\n credentials = pika.PlainCredentials('USER', 'PASSWORD')\n connection = pika.BlockingConnection(pika.ConnectionParameters('rabbitmq', 5672, '/', credentials))\n channel = connection.channel()\n channel.queue_declare(queue='purserq')\n method_frame, header_frame, body = channel.basic_get(queue='purserq')\n if method_frame.NAME == 'Basic.GetEmpty':\n connection.close()\n else:\n channel.basic_ack(delivery_tag=method_frame.delivery_tag)\n print \"Received job:\", body, \"starting job to reply\"\n connection.close()\n self.reply_to_master(body)\n except AttributeError:\n print \"No content\"\n connection.close()\n except pika.exceptions.ConnectionClosed:\n print \"You get Connection Closed\"\n continue",
"def consumeMsg():\n\tosuser = 'osdev'\n\tospass = 'osdev'\n\toshost = '10.32.29.94'\n\tosport = '5672'\n\tosvhost = '/openstack'\n\tneutronExchange = Exchange('quantum', type='topic', durable=False)\n\tinfoQueue = Queue('exthook', exchange=neutronExchange , durable=False,\n\t\t\trouting_key='notifications.info')\n\twith Connection(\"\".join(['amqp://', osuser, ':', ospass, '@', \n\t\toshost, ':',osport, '/', osvhost])) as conn:\n\t\twith conn.Consumer(infoQueue, callbacks=[msgParse]):\n\t\t\twhile True:\n\t\t\t\ttry: \n\t\t\t\t\tconn.drain_events()\n\t\t\t\texcept Exception, e:\n\t\t\t\t\tlogging.exception('Draining events from AMQP stop')\n\t\t\t\t\tbreak",
"def add_alarm(self, reference, message, timestamp=None):\n alarm = Alarm.Alarm(reference, message, timestamp)\n outbound_message = self.outbound_message_factory.make_from_alarm(alarm)\n self.outbound_message_queue.put(outbound_message)",
"def alarms():\n\n if _TRAFFICCTL:\n cmd = _traffic_ctl(\"alarm\", \"list\")\n else:\n cmd = _traffic_line(\"--alarms\")\n\n return _subprocess(cmd)",
"async def consume_items_from_rabbitmq(queue):\n ctr = 0\n start = time.time()\n while True:\n await asyncio.sleep(0.001)\n for method_frame, properties, body in channel.consume(queue_name, inactivity_timeout=1):\n if method_frame:\n # print(body)\n while queue.full():\n await asyncio.sleep(0.001)\n # await queue.put(body)\n queue.put_nowait(body)\n # Acknowledge the message\n channel.basic_ack(method_frame.delivery_tag)\n ctr += 1\n if not ctr % 1000:\n end = time.time() - start\n print(f'elapsed time: {end:.3f}\\tmessages received: {ctr}')\n else:\n # empty remaining items from queue\n while queue.qsize():\n await asyncio.sleep(0.001)\n end = time.time() - start\n print(f'elapsed time: {end:.3f}\\tmessages received: {ctr}')\n break\n await asyncio.sleep(0.001)\n\n requeued_messages = channel.cancel()",
"def periodicCall(self):\r\n if self.queue.qsize() != 0:\r\n self.action = self.queue.get()\r\n print(self.action)\r\n \r\n if not self.running:\r\n # This is the brutal stop of the system. You may want to do\r\n # some cleanup before actually shutting it down.\r\n import sys\r\n sys.exit(1)\r\n self.master.after(100, self.periodicCall)",
"def queueStatusAll():",
"def event_queue_proc(self,event):\r\n event()",
"def alarm_message(bot, user_id):\n\n def ring_alarm():\n\n message = bot_collection[user_id].get_alarm_message()\n alarm_count = bot_collection[user_id].alarm_count\n\n for i in range(alarm_count):\n bot_message = bot.send_message(text=message, chat_id=user_id)\n time.sleep(1)\n message_id = bot_message.message_id\n bot.delete_message(chat_id=user_id, message_id=message_id)\n\n return ring_alarm",
"def q_get(self):\n while not self.stopped():\n try:\n return self.in_q.get(timeout=self.heart_beat)\n except queue.Empty:\n pass",
"def test_pull_and_ack(self):\n self.assertEqual(self.post(['foo', 'bar']), [1, 2])\n messages=self.pull()\n self.assertDictContainsSubset({'id':1,'message':'foo'}, messages[0])\n data = {\n 'receiver': 'receiver1',\n 'queues': {\n 'queue1': {}\n },\n 'results': {\n 'queue1': {\n '1':'received',\n }\n }\n }\n r=s.post('/pull', json=data)\n self.assertEqual(r.status_code, 200)\n messages = r.json()['messages']['queue1']\n self.assertLessEqual(len(messages), 1)\n self.assertDictContainsSubset({'id':2,'message':'bar'}, messages[0])\n\n self.assertInDatabase(\n 'queue1_rst',\n {\n 'm_id':1,\n 'receiver':'receiver1',\n 'result':'received'\n }\n )\n self.assertInDatabase(\n 'queue1_rst',\n {\n 'm_id':2,\n 'receiver':'receiver1',\n 'status':'processing',\n }\n )",
"def msgpull():\n async def unknown():\n async for msg in _privateapi: # required to consume messages...\n print(f\"Unknown message: {msg}\") # TODO : probaby onto some error log somehow...\n\n if msgpull_task is None:\n asyncio.get_running_loop().create_task(unknown())",
"def _queue(self, timeout=None):\n while True:\n # Retry self._kq_control if the system call was interrupted\n try:\n events = self._kq.control(None, 16, timeout)\n break\n except OSError, e:\n if e.errno == errno.EINTR:\n continue\n raise\n for ev in events:\n event = (ev.ident, ev.filter)\n if event in self._kq_events:\n if (ev.filter == select.KQ_FILTER_PROC and\n ev.fflags == select.KQ_NOTE_EXIT):\n self._kq_events.pop(event).emit()\n else:\n self._kq_events[event].emit()",
"def alarm(self, alarm):\n\n self._alarm = alarm",
"def __on_alarm(\n self, event_name: str, data: dict, kwargs: dict) -> None:\n self.run_in(self.__delayed_announcement, 40)",
"def _isalarm(self):\n return self.dp.state()==PyTango.DevState.ALARM",
"def check_alert(self, event):\n \n # Get board logger\n board_logger = self.get_board_logger()\n\n # Loop for an hour and continue to alert every ten minutes \n current_time = datetime.now()\n end_time = current_time + timedelta(0, 60)\n # end_time = current_time + timedelta(hours=1)\n\n alarm_counter = 0\n while current_time < end_time:\n # Sleep for 10 minutes\n sleep(10);\n #sleep(600);\n\n # Prevent race condition between Board input_status and check_alert \n if GPIO.input(self.__pin) == 1:\n\n # Log alarm cycle\n alarm_counter += 1\n board_logger.info(\"Alarm Cycle #%s: Initiating event \" \n + \"alert.\", str(alarm_counter))\n\n # Call Event object's alert method\n event.alert(self.__ip, board_logger)\n\n # Get current time\n current_time = datetime.now()\n \n else:\n # Input status is 0 indicating recovery; Break out of loop and \n # return to main thread \n board_logger.info(\"Alarm state recovery.\") \n break\n \n # End of alert cycle; Return to main thread\n status = \"ALARM\" if self.get_input_status() else \"RECOVERY\"\n board_logger.info(\"End check alarm cycle. Current pin input \"\n + \"status is %s.\", status)",
"def pop():\n task = connection.zrange(QUEUE_KEY, 0, 0)\n if not task:\n return False, 'No emails now!'\n msg_id = task[0]\n timestamp = connection.zscore(QUEUE_KEY, msg_id)\n now = datetime.datetime.now().timestamp()\n if timestamp < now or abs(timestamp - now) <= 1e-6:\n message = connection.get(msg_id)\n pipeline = connection.pipeline()\n pipeline.zrem(QUEUE_KEY, msg_id)\n pipeline.delete(msg_id)\n pipeline.execute()\n return True, message\n return False, \"It's too early now!\""
] | [
"0.62227225",
"0.590374",
"0.58129394",
"0.5800809",
"0.5796147",
"0.57914853",
"0.57625484",
"0.57412857",
"0.5735919",
"0.56989264",
"0.5624381",
"0.55708325",
"0.5546818",
"0.55423856",
"0.5510702",
"0.54998934",
"0.5499139",
"0.5491048",
"0.54501593",
"0.5438655",
"0.543015",
"0.5428605",
"0.54259557",
"0.54067165",
"0.5400293",
"0.5392364",
"0.5352687",
"0.5339612",
"0.5337397",
"0.53318715"
] | 0.74262494 | 0 |
check whether match for every alarmalarm_defmatch_key the match result will be self.matched_alarm_list | def match_alarm(self):
for alarm in self.alarm_list:
is_matched = False
self._match_alarm_by_def(alarm)
if alarm["_match_info"].get("alarm_def_id"):
self.matched_alarm_list.append(alarm)
is_matched = True
if is_matched:
logger.debug(
"Matched alarm(source_id:%s)",
alarm["_match_info"].get("source_id"))
else:
logger.debug(
"UNMatched alarm(source_id:%s)",
alarm["_match_info"].get("source_id"))
unmatch_alarm_hook(alarm)
logger.info("matched_alarm_list (%s)", len(self.matched_alarm_list)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _match_alarm_by_def(self, alarm, origin_alarm_def_id=None,\n unmatch_log=None):\n if unmatch_log is None and settings.ENV == \"TEST\":\n unmatch_log = True\n\n matched_alarm_def_id = None\n for alarm_def in self.alarm_def_list:\n for match_key, match_func in constants.ALARM_MATCH_KEY.items():\n # get alarm_def_value from alarm_def_dict\n alarm_def_value = alarm_def[match_key]\n # exclude_biz_ids 需要特色处理\n if match_key == 'exclude_biz_ids':\n alarm_value = str(alarm[\"_match_info\"].get('cc_biz_id'))\n else:\n # get alarm_value from _match_info_dict in alarm_dict\n alarm_value = alarm[\"_match_info\"].get(match_key)\n # get the check function\n operator_func_name = self.MATCH_FUNC[match_func]\n operator_func = getattr(self, operator_func_name)\n # rule1. if not alarm_def_value is matched\n # rule2. if not alarm_value is not matched\n # rule3. exec check function return whether matched\n is_matched = (not alarm_def_value) or (alarm_value and operator_func(alarm_value, alarm_def_value))\n if not is_matched:\n if unmatch_log:\n logger.debug(\"unmatched_key/alarm_def/alarm: %s %s %s\", match_key, alarm_def_value, alarm_value)\n break\n # else means is matched\n else:\n if origin_alarm_def_id:\n # If origin_alarm_def_id is not None, means\n # probably match multi alarm_def, so we should\n # check the matched alarm_def_id whether\n # is origin_alarm_def_id\n if alarm_def[\"id\"] != origin_alarm_def_id:\n continue\n # add alarm_def_id to _match_info dict for converge\n alarm[\"_match_info\"][\"alarm_def_id\"] = alarm_def[\"id\"]\n matched_alarm_def_id = alarm_def[\"id\"]\n break\n return matched_alarm_def_id",
"def matches(self):\n pass",
"def _match(self) -> None:\n self.matched = [i for i in self.data if self.match(i)]\n self.unmatched = [i for i in self.data if not self.match(i)]",
"def check_answer(self,msg_list,honeypotids,expect_dict):\n filtered_msgs = []\n for msg in msg_list:\n if \"ALL\" in honeypotids or msg[\"from\"] in honeypotids:\n for k in expect_dict.keys():\n if k in msg.keys():\n if msg[k] == expect_dict[k]:\n filtered_msgs.append(msg)\n return filtered_msgs",
"def _match(self, check):\n matches = []\n tests = {}\n for k, v in check.items():\n if isinstance(v, dict):\n tests[k] = CompositeFilter(v)\n else:\n tests[k] = lambda o: _add_tz(o) == _add_tz(v)\n\n for rec in self._records.values():\n if self._match_one(rec, tests):\n matches.append(deepcopy(rec))\n return matches",
"def _add_matches(self):\r\n for record in self.records:\r\n match_dict={key_to_track: record.get(key_to_track)\r\n for key_to_track in self.key_matcher.keys()}\r\n self.key_matcher.add(obj=record,\r\n match_dict=match_dict)",
"def match_candidates(self):\n for event in self._events:\n event.match_candidates()",
"def wait_for_all_alarms(alarm_def_id_list, mon_client, number_of_expected_alarms):\n print('Waiting for alarms to be created')\n check_start_time = time.time()\n alarm_count = 0\n while alarm_count < number_of_expected_alarms:\n alarm_count = 0\n for id in alarm_def_id_list:\n num = len(mon_client.alarms.list(alarm_definition_id=id))\n alarm_count += num\n\n if check_start_time + TIMEOUT < time.time():\n print \"TIMEOUT. Found only {} alarms expect {}\".format(alarm_count, number_of_expected_alarms)\n break\n\n return alarm_count",
"def test_get_hyperflex_alarm_list(self):\n pass",
"def matches_dict(self, answer_dict):\n\n return self.matches(Answer(\n answer_dict['group_id'],\n answer_dict['block_id'],\n answer_dict['answer_id'],\n \"\",\n answer_dict['group_instance'],\n answer_dict['answer_instance'],\n ))",
"def match_indicators(self):\n return self._match_indicators",
"def on_matching_rules(self, matching_rules):\n pass",
"def matches(self):\n return False",
"def _matchcondition_holds(self, matchconditions, src_sheet):\n matches=True\n if matchconditions is None:\n return False\n\n for incoming_key, incoming_value in matchconditions.items():\n if (incoming_key in src_sheet.properties and \\\n str(src_sheet.properties[incoming_key]) != str(incoming_value)) \\\n or (incoming_key not in src_sheet.properties and incoming_value is not None):\n matches=False\n break\n\n return matches",
"def modifyAlarms(parsedDict):\n log = list()\n if len(parsedDict) == 0:\n log.append('No changes were made to the database')\n else:\n for alarmName, alarmState in parsedDict.items():\n activationTime = timeOffset(secs=readNotificationGap(alarmName)) # timeNow + notifyGap = activationTime\n alarmId = allAlarms[alarmName]\n if alarmState == 1: # User has turned on the alarm\n if isActive(alarmName, activeDict): # If the alarm is already active\n message = '%s is already active' % alarmName\n log.append(message)\n else: # Alarm Activated/ New Entry\n activateAlarm(alarmId, activationTime)\n message = '%s has been activated' % alarmName\n log.append(message)\n elif alarmState == 0: # User has turned off the alarm\n if isActive(alarmName, activeDict): # Alarm Deactivated/ Entry Closed\n deactivateAlarm(alarmId)\n message = '%s has been deactivated' % alarmName\n log.append(message)\n else: # If the Alarm is inactive\n message = \"%s is already deactivated\" % alarmName\n log.append(message)\n # Don't process newAlarmState is None\n return log",
"def _match_all(self, obj, criteria):\n return all(getattr(obj, key_, None) == value_ for\n key_, value_ in criteria.items())",
"def match_all(self, match_all):\n \n self._match_all = match_all",
"def update_alarms():\n try:\n print(\"update alarms\")\n alarm_groups = {}\n # group everything by region\n for alarm in cloudwatch_data.all_subscribed_alarms():\n region_name = alarm[\"Region\"]\n alarm_name = alarm[\"AlarmName\"]\n if region_name not in alarm_groups:\n alarm_groups[region_name] = []\n alarm_groups[region_name].append(alarm_name)\n print(alarm_groups)\n # update each grouped list for a region\n for region_name in alarm_groups:\n alarm_names = alarm_groups[region_name]\n cloudwatch_data.update_alarms(region_name, alarm_names)\n except ClientError as error:\n print(error)\n return True",
"def match(self) -> bool:",
"def run_all_device_rules(self, device, modify_config=False):\n for _ in self.run_rule(device, modify_config=modify_config):\n continue\n self.logger.debug(\"confirm_all_matches: self.matches: {}\".format(self.matches))",
"def logs(self, **kwargs):\n matches = []\n for record in self.buffer:\n found_match = True\n for (key, value) in kwargs.items():\n if key == 'msg':\n # Regexp match\n if not re.search(value, str(record.get(key))):\n found_match = False\n break\n elif not value == record.get(key):\n found_match = False\n break\n if found_match:\n matches.append(record)\n return matches",
"def matcher(item):\n hit = item.get(lookup_key)\n if not isinstance(hit, list):\n return hit == identifier\n return any([el for el in hit if el == identifier])",
"def match(self, match_dict):\r\n for match_key in match_dict.keys():\r\n assert match_key in self.keys_to_track\r\n\r\n for key_to_track in self.keys_to_track:\r\n if match_dict.has_key(key_to_track):\r\n match_val = match_dict[key_to_track]\r\n if self.tracker[key_to_track].has_key(match_val):\r\n return self.tracker[key_to_track][match_val]\r\n return None",
"def get_alarms() -> List[Dict[str, Any]]:\n return __alarm_info.values()",
"def get_matches(self, update_flow=True):\n if update_flow:\n self.update_flow()\n flow = (self.capacity-self.residual).toarray()\n\n matches = self.fixed_matches.copy()\n for p_idx in range(self.pod_num):\n for m_idx in range(self.mentor_num):\n for s_idx, u, v in self.shared_slots[p_idx, m_idx]:\n if flow[u, v]>0:\n if self.stage==1:\n matches.append(\n (s_idx, self.pod_info['name'][p_idx], self.mentor_info['email'][m_idx])\n )\n if self.stage==2:\n matches.append(\n (s_idx%SLOT_NUM, self.pod_info['name'][p_idx], self.mentor_info['email'][m_idx])\n )\n return matches",
"def handleMatch(self, m):\r\n pass",
"def match(self, result: dict):\n if self._matchStatus(result['Status']):\n if self._comparator['Length']:\n return self._matchLength(int(result['Length']))\n if self._comparator['Time']:\n return self._matchTime(result['Time Taken'])\n return True\n return False",
"def matches(self, accession):\n pass",
"def test_matching_list(self):\n\n regex = \"fun\"\n expected = [\"funilrys\", \"funceble\", \"pyfunceble\"]\n actual = Regex(self.data_list, regex).matching_list()\n\n self.assertEqual(expected, actual)",
"def get_matches(self,redcap,boolean,val):\n\t\tmatches = []\n\t\ttry:\n\t\t\tfor eachdict in self.get_data(redcap):\n\t\t\t\tif (boolean):\n\t\t\t\t\tif (eachdict[redcap].strip() == str(val)):\n\t\t\t\t\t\tmatches.append((eachdict['article_doi'],eachdict['record_id'],eachdict[redcap]))\n\t\t\t\telse:\n\t\t\t\t\tif (eachdict[redcap].strip() != str(val)):\n\t\t\t\t\t\tmatches.append((eachdict['article_doi'],eachdict['record_id'],eachdict[redcap]))\n\t\texcept KeyError as e:\n\t\t\tprint(\"redcap field: '{}'\\nnot found. did you mean: '{}'?\\nverify and try again\".format(redcap,get_close_matches(redcap,[d['field_name'] for d in self.get_metadata()])))\n\t\treturn matches"
] | [
"0.77223146",
"0.61824495",
"0.61445177",
"0.59490174",
"0.58866197",
"0.5850368",
"0.5601849",
"0.5581404",
"0.5487921",
"0.5483094",
"0.53939867",
"0.5369485",
"0.5360608",
"0.53533703",
"0.5322929",
"0.5289111",
"0.52547723",
"0.52222395",
"0.52204394",
"0.5210209",
"0.5196609",
"0.5193652",
"0.5189986",
"0.5171469",
"0.5155412",
"0.5150193",
"0.5138053",
"0.5131385",
"0.512886",
"0.51278394"
] | 0.82694453 | 0 |
match alarm by alarm_def alarm["_match_info"]["alarm_def_id"] will be matched alarm_def's id | def _match_alarm_by_def(self, alarm, origin_alarm_def_id=None,
unmatch_log=None):
if unmatch_log is None and settings.ENV == "TEST":
unmatch_log = True
matched_alarm_def_id = None
for alarm_def in self.alarm_def_list:
for match_key, match_func in constants.ALARM_MATCH_KEY.items():
# get alarm_def_value from alarm_def_dict
alarm_def_value = alarm_def[match_key]
# exclude_biz_ids 需要特色处理
if match_key == 'exclude_biz_ids':
alarm_value = str(alarm["_match_info"].get('cc_biz_id'))
else:
# get alarm_value from _match_info_dict in alarm_dict
alarm_value = alarm["_match_info"].get(match_key)
# get the check function
operator_func_name = self.MATCH_FUNC[match_func]
operator_func = getattr(self, operator_func_name)
# rule1. if not alarm_def_value is matched
# rule2. if not alarm_value is not matched
# rule3. exec check function return whether matched
is_matched = (not alarm_def_value) or (alarm_value and operator_func(alarm_value, alarm_def_value))
if not is_matched:
if unmatch_log:
logger.debug("unmatched_key/alarm_def/alarm: %s %s %s", match_key, alarm_def_value, alarm_value)
break
# else means is matched
else:
if origin_alarm_def_id:
# If origin_alarm_def_id is not None, means
# probably match multi alarm_def, so we should
# check the matched alarm_def_id whether
# is origin_alarm_def_id
if alarm_def["id"] != origin_alarm_def_id:
continue
# add alarm_def_id to _match_info dict for converge
alarm["_match_info"]["alarm_def_id"] = alarm_def["id"]
matched_alarm_def_id = alarm_def["id"]
break
return matched_alarm_def_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def match_alarm(self):\n for alarm in self.alarm_list:\n is_matched = False\n self._match_alarm_by_def(alarm)\n if alarm[\"_match_info\"].get(\"alarm_def_id\"):\n self.matched_alarm_list.append(alarm)\n is_matched = True\n\n if is_matched:\n logger.debug(\n \"Matched alarm(source_id:%s)\",\n alarm[\"_match_info\"].get(\"source_id\"))\n else:\n logger.debug(\n \"UNMatched alarm(source_id:%s)\",\n alarm[\"_match_info\"].get(\"source_id\"))\n unmatch_alarm_hook(alarm)\n\n logger.info(\"matched_alarm_list (%s)\", len(self.matched_alarm_list))",
"def get_alarm_definationID(alarm_uuid):\n alarm_definationID = None\n if alarm_uuid :\n try:\n access_config = get_alarm_config()\n headers = {'Accept': 'application/json'}\n api_url = '{}/suite-api/api/alerts/{}'.format(access_config.get('vrops_site'), alarm_uuid)\n api_response = requests.get(\n api_url,\n auth=(access_config.get('vrops_user'), access_config.get('vrops_password')),\n verify = False, headers = headers\n )\n\n if api_response.status_code == 200:\n data = api_response.json()\n if data.get(\"alertDefinitionId\") is not None:\n alarm_definationID = '-'.join(data.get(\"alertDefinitionId\").split('-')[1:])\n else:\n logger.error(\"Failed to get alert definition ID for alarm {}\".format(alarm_uuid))\n except Exception as exp:\n logger.error( \"Exception occured while getting alert definition ID for alarm : {}\".format(exp, alarm_uuid))\n\n return alarm_definationID",
"def pull_alarm(self):\n self.job = MATCH_QUEUE.take(timeout=settings.QUEUE_WAIT_TIMEOUT)\n\n if not self.job:\n raise lock.PassEmpty\n\n # JSON数据格式,反序列化\n try:\n self.alarm_list = map(json.loads, self.job.body.strip().splitlines())\n except Exception as error:\n logger.warning(\n 'match alarm pull error:%s, %s, please check job is json serialized',\n error,\n self.job.body)",
"def getMatchId(self):\n return None",
"def registerAlarm(self, cmd, module, ref):\n id = self.__register(cmd, module, ref, ALARM)\n logging.debug(\"--Registered an Alarm(%s, %s)\" % (id, cmd))\n return id",
"def handleMatch(self, m):\r\n pass",
"def addAlarm(self,payload):\n\n url = \"/alarm/alarms\"\n\n headers = {\n 'Content-Type':'application/vnd.com.nsn.cumulocity.alarm+json'\n }\n\n response = self.postRequest(url,payload)\n \n if response.status_code == 201:\n self.logger.info(\"device alarm added\")\n else:\n raise Exception(\"could not add device alarm\")",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n enabled: Optional[pulumi.Input[bool]] = None,\n instance_id: Optional[pulumi.Input[int]] = None,\n message_type: Optional[pulumi.Input[str]] = None,\n queue_regex: Optional[pulumi.Input[str]] = None,\n recipients: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,\n reminder_interval: Optional[pulumi.Input[int]] = None,\n time_threshold: Optional[pulumi.Input[int]] = None,\n type: Optional[pulumi.Input[str]] = None,\n value_calculation: Optional[pulumi.Input[str]] = None,\n value_threshold: Optional[pulumi.Input[int]] = None,\n vhost_regex: Optional[pulumi.Input[str]] = None) -> 'Alarm':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _AlarmState.__new__(_AlarmState)\n\n __props__.__dict__[\"enabled\"] = enabled\n __props__.__dict__[\"instance_id\"] = instance_id\n __props__.__dict__[\"message_type\"] = message_type\n __props__.__dict__[\"queue_regex\"] = queue_regex\n __props__.__dict__[\"recipients\"] = recipients\n __props__.__dict__[\"reminder_interval\"] = reminder_interval\n __props__.__dict__[\"time_threshold\"] = time_threshold\n __props__.__dict__[\"type\"] = type\n __props__.__dict__[\"value_calculation\"] = value_calculation\n __props__.__dict__[\"value_threshold\"] = value_threshold\n __props__.__dict__[\"vhost_regex\"] = vhost_regex\n return Alarm(resource_name, opts=opts, __props__=__props__)",
"def parse_duress_alarm(self):\n alarm_type = -1\n sn = -1\n match_type = -1\n if self.last_event_code == DEFS.EF_ALARM:\n alarm_type = struct.unpack('<H', self.last_payload_data[4:6])[0]\n sn = struct.unpack('<H', self.last_payload_data[6:8])[0]\n match_type = struct.unpack('<I', self.last_payload_data[8:12])[0]\n return [alarm_type, sn, match_type]",
"def match_Id(self, object, options) :\n\t\treturn self.match_anystring(\"id\", object.getId(), options)",
"def match_id(self):\n return self._id",
"def match_node_id(self, id_, match):\n pass",
"def test_get_hyperflex_alarm_list(self):\n pass",
"def alarm(self, alarm):\n\n self._alarm = alarm",
"def get_alarm(self, name):\n\n alarm = self._alarm_manager.get_alarm(name)\n\n return alarm",
"def match_id(self, match_id):\n\n self._match_id = match_id",
"def set_alarms(alarm:dict, s):\r\n time = alarm['title'][:10:] + \" \" + alarm['title'][11::]\r\n alarm_time = datetime.datetime.strptime(time, \"%Y-%m-%d %H:%M\")\r\n delay = (alarm_time - datetime.datetime.strptime(str(datetime.datetime.now()).rpartition(':')[0], \"%Y-%m-%d %H:%M\")).total_seconds()\r\n if alarm['news'] and alarm['weather']:\r\n message = alarm['content'] + \" - Top news stories - One - \" + (get_news()[-1])['name'] + \" - two - \" + (get_news()[-2])['name'] + \" - three - \" + (get_news()[-3])['name'] + \" - \" + get_weather() + \" - Covid-19 update - \" + get_covid()\r\n elif alarm['news']:\r\n message = alarm['content'] + \" - Top news stories - One - \" + (get_news()[-1])['name'] + \" - two - \" + (get_news()[-2])['name'] + \" - three - \" + (get_news()[-3])['name'] + \" - Covid-19 update - \" + get_covid()\r\n elif alarm['weather']:\r\n message = alarm['content'] + \" - \" + get_weather() + \" - Covid-19 update - \" + get_covid()\r\n else:\r\n message = alarm['content'] + \" - Covid-19 update - \" + get_covid()\r\n s.enter(int(delay),1,set_off_alarm,(message,))\r\n logging.info(\"Alarm set in set_alarms(): \" + message)",
"def alarm_message_received(msg):\n data = IndexIntData.from_msg(msg)\n if data is None:\n _LOGGER.error(\"Undable to parse MQTT message\")\n\n if data.value == 1:\n self._alarm.add(data.index)\n else:\n self._alarm.discard(data.index)\n\n _LOGGER.debug(\"Alarm: %s\", self._alarm)\n self.async_write_ha_state()",
"def create_match_api_rule(self, prio_id, handle_id, table_id,\n match_field_value_mask_list, action, action_value=None):\n pass",
"def clear_alarm_definition(monasca_client):\n pattern = re.compile(ALARM_DEFINITION_NAME + '[0-9]+')\n\n for alarm_definition in monasca_client.alarm_definitions.list():\n try:\n if pattern.match(alarm_definition['name']):\n monasca_client.alarm_definitions.delete(alarm_id=alarm_definition['id'])\n except Exception as e:\n print('Failed to delete alarm definition ERROR:')\n print e",
"def match(self, item):",
"def refine_alarms_message(self, decoded_message):\n\n # Initialize return values\n currentTime = None\n\n # Go through all of the Single Context Polls\n for singleContextPolls in decoded_message['PollMdibDataReplyExt']['PollInfoList']:\n\n # Make sure that they are dicts (ie not length, count), and they aren't empty\n if type(decoded_message['PollMdibDataReplyExt']['PollInfoList'][singleContextPolls]) == dict and decoded_message['PollMdibDataReplyExt']['PollInfoList'][singleContextPolls]['SingleContextPoll']['poll_info']['length'] > 0:\n\n # Go through all of the Observation Polls (each data modality stored in separate observation poll)\n for observationPolls in decoded_message['PollMdibDataReplyExt']['PollInfoList'][singleContextPolls]['SingleContextPoll']['poll_info']:\n\n # Make sure that they are dicts (ie not length, count)\n if type(decoded_message['PollMdibDataReplyExt']['PollInfoList'][singleContextPolls]['SingleContextPoll']['poll_info'][observationPolls]) == dict:\n\n # Initialize timestamp\n currentTime = self.initialTimeDateTime + datetime.timedelta(seconds = int((decoded_message['PollMdibDataReplyExt']['RelativeTime'] - self.relativeInitialTime)/8000))\n currentTime = str(currentTime.time())\n\n # Initialize currentTime\n if currentTime not in self.VitalsNumericsAlarmsData:\n self.VitalsNumericsAlarmsData[currentTime] = {}\n\n # If the message active patient alarm data, store it\n if 'NOM_ATTR_AL_MON_P_AL_LIST' in decoded_message['PollMdibDataReplyExt']['PollInfoList'][singleContextPolls]['SingleContextPoll']['poll_info'][observationPolls]['ObservationPoll']['AttributeList']['AVAType']:\n\n i = 0\n\n # Iterate through all the different data types (ie scada) in the compound value\n for devAlarm in decoded_message['PollMdibDataReplyExt']['PollInfoList'][singleContextPolls]['SingleContextPoll']['poll_info'][observationPolls]['ObservationPoll']['AttributeList']['AVAType']['NOM_ATTR_AL_MON_P_AL_LIST']['AttributeValue']['DevAlarmList']:\n\n # Make sure that they are dicts (ie not length, count)\n if type(decoded_message['PollMdibDataReplyExt']['PollInfoList'][singleContextPolls]['SingleContextPoll']['poll_info'][observationPolls]['ObservationPoll']['AttributeList']['AVAType']['NOM_ATTR_AL_MON_P_AL_LIST']['AttributeValue']['DevAlarmList'][devAlarm]) == dict:\n\n self.VitalsNumericsAlarmsData[currentTime]['Alarm_P_' + str(i)] = {}\n self.VitalsNumericsAlarmsData[currentTime]['Alarm_P_' + str(i)] ['timestamp'] = currentTime\n self.VitalsNumericsAlarmsData[currentTime]['Alarm_P_' + str(i)]['code'] = decoded_message['PollMdibDataReplyExt']['PollInfoList'][singleContextPolls]['SingleContextPoll']['poll_info'][observationPolls]\\\n ['ObservationPoll']['AttributeList']['AVAType']['NOM_ATTR_AL_MON_P_AL_LIST']['AttributeValue']['DevAlarmList'][devAlarm]['DevAlarmEntry']['al_code']#.split('NOM_EVT_')[1]\n self.VitalsNumericsAlarmsData[currentTime]['Alarm_P_' + str(i)]['source'] = decoded_message['PollMdibDataReplyExt']['PollInfoList'][singleContextPolls]['SingleContextPoll']['poll_info'][observationPolls]\\\n ['ObservationPoll']['AttributeList']['AVAType']['NOM_ATTR_AL_MON_P_AL_LIST']['AttributeValue']['DevAlarmList'][devAlarm]['DevAlarmEntry']['al_source']#.split('NOM_')[1]\n self.VitalsNumericsAlarmsData[currentTime]['Alarm_P_' + str(i)]['alarmType'] = decoded_message['PollMdibDataReplyExt']['PollInfoList'][singleContextPolls]['SingleContextPoll']['poll_info'][observationPolls]\\\n ['ObservationPoll']['AttributeList']['AVAType']['NOM_ATTR_AL_MON_P_AL_LIST']['AttributeValue']['DevAlarmList'][devAlarm]['DevAlarmEntry']['AlertType']#.split('_')[0] + '_P'\n self.VitalsNumericsAlarmsData[currentTime]['Alarm_P_' + str(i)]['state'] = decoded_message['PollMdibDataReplyExt']['PollInfoList'][singleContextPolls]['SingleContextPoll']['poll_info'][observationPolls]\\\n ['ObservationPoll']['AttributeList']['AVAType']['NOM_ATTR_AL_MON_P_AL_LIST']['AttributeValue']['DevAlarmList'][devAlarm]['DevAlarmEntry']['AlertState']#.split('AL_')[1]\n self.VitalsNumericsAlarmsData[currentTime]['Alarm_P_' + str(i)]['string'] = decoded_message['PollMdibDataReplyExt']['PollInfoList'][singleContextPolls]['SingleContextPoll']['poll_info'][observationPolls]\\\n ['ObservationPoll']['AttributeList']['AVAType']['NOM_ATTR_AL_MON_P_AL_LIST']['AttributeValue']['DevAlarmList'][devAlarm]['DevAlarmEntry']['StrAlMonInfo']['String']['value']\n i += 1\n\n # If the patient contains active technical alarms, store it\n if 'NOM_ATTR_AL_MON_T_AL_LIST' in decoded_message['PollMdibDataReplyExt']['PollInfoList'][singleContextPolls]['SingleContextPoll']['poll_info'][observationPolls]['ObservationPoll']['AttributeList']['AVAType']:\n\n i = 0\n\n # Iterate through all the different data types (ie scada) in the compound value\n for devAlarm in decoded_message['PollMdibDataReplyExt']['PollInfoList'][singleContextPolls]['SingleContextPoll']['poll_info'][observationPolls]['ObservationPoll']['AttributeList']['AVAType']['NOM_ATTR_AL_MON_T_AL_LIST']['AttributeValue']['DevAlarmList']:\n\n # Make sure that they are dicts (ie not length, count)\n if type(decoded_message['PollMdibDataReplyExt']['PollInfoList'][singleContextPolls]['SingleContextPoll']['poll_info'][observationPolls]['ObservationPoll']['AttributeList']['AVAType']['NOM_ATTR_AL_MON_T_AL_LIST']['AttributeValue']['DevAlarmList'][devAlarm]) == dict:\n\n self.VitalsNumericsAlarmsData[currentTime]['Alarm_T_' + str(i)] = {}\n self.VitalsNumericsAlarmsData[currentTime]['Alarm_T_' + str(i)] ['timestamp'] = currentTime\n self.VitalsNumericsAlarmsData[currentTime]['Alarm_T_' + str(i)]['code'] = decoded_message['PollMdibDataReplyExt']['PollInfoList'][singleContextPolls]['SingleContextPoll']['poll_info'][observationPolls]\\\n ['ObservationPoll']['AttributeList']['AVAType']['NOM_ATTR_AL_MON_T_AL_LIST']['AttributeValue']['DevAlarmList'][devAlarm]['DevAlarmEntry']['al_code']#.split('NOM_EVT_')[1]\n self.VitalsNumericsAlarmsData[currentTime]['Alarm_T_' + str(i)]['source'] = decoded_message['PollMdibDataReplyExt']['PollInfoList'][singleContextPolls]['SingleContextPoll']['poll_info'][observationPolls]\\\n ['ObservationPoll']['AttributeList']['AVAType']['NOM_ATTR_AL_MON_T_AL_LIST']['AttributeValue']['DevAlarmList'][devAlarm]['DevAlarmEntry']['al_source']#.split('NOM_')[1]\n self.VitalsNumericsAlarmsData[currentTime]['Alarm_T_' + str(i)]['alarmType'] = decoded_message['PollMdibDataReplyExt']['PollInfoList'][singleContextPolls]['SingleContextPoll']['poll_info'][observationPolls]\\\n ['ObservationPoll']['AttributeList']['AVAType']['NOM_ATTR_AL_MON_T_AL_LIST']['AttributeValue']['DevAlarmList'][devAlarm]['DevAlarmEntry']['AlertType']#.split('_')[0] + '_T'\n self.VitalsNumericsAlarmsData[currentTime]['Alarm_T_' + str(i)]['state'] = decoded_message['PollMdibDataReplyExt']['PollInfoList'][singleContextPolls]['SingleContextPoll']['poll_info'][observationPolls]\\\n ['ObservationPoll']['AttributeList']['AVAType']['NOM_ATTR_AL_MON_T_AL_LIST']['AttributeValue']['DevAlarmList'][devAlarm]['DevAlarmEntry']['AlertState']#.split('AL_')[1]\n self.VitalsNumericsAlarmsData[currentTime]['Alarm_T_' + str(i)]['string'] = decoded_message['PollMdibDataReplyExt']['PollInfoList'][singleContextPolls]['SingleContextPoll']['poll_info'][observationPolls]\\\n ['ObservationPoll']['AttributeList']['AVAType']['NOM_ATTR_AL_MON_T_AL_LIST']['AttributeValue']['DevAlarmList'][devAlarm]['DevAlarmEntry']['StrAlMonInfo']['String']['value']\n i += 1\n\n # # Append to file _if_ you are keeping a text file\n # with open(self.VitalsNumericsAlarmsDataFile, 'a+') as outfile:\n # json.dump(self.VitalsNumericsAlarmsData, outfile)\n\n ret = {'timestamp': self.timestamp(decoded_message),\n 'alarms': {} }\n\n if currentTime:\n for key, value in self.VitalsNumericsAlarmsData[currentTime].iteritems():\n if key.startswith('Alarm'):\n ret['alarms'][key] = {'source': value['source'],\n 'code': value['code'],\n 'state': value['state'],\n 'string': value['string'],\n 'type': value['alarmType']}\n\n # This possibly fixes growing forever problem\n del self.VitalsNumericsAlarmsData[currentTime]\n\n if not ret['alarms']:\n return None\n\n return ret",
"def _isalarm(self):\n return self.dp.state()==PyTango.DevState.ALARM",
"async def get_match_from_id(match_id: int) -> Match or None:\n if match_id is None:\n return None\n\n if match_id in match_library:\n return match_library[match_id]\n\n raw_data = await matchdb.get_raw_match_data(match_id)\n if raw_data is not None:\n return await make_match_from_raw_db_data(raw_data)\n else:\n return None",
"def set_new_alarm():\r\n time = request.args.get('alarm')\r\n name = request.args.get('two')\r\n news = request.args.get('news')\r\n weather = request.args.get('weather')\r\n date = time[:10:] + \" \" + time[11::]\r\n if news is None:\r\n news = False\r\n else:\r\n news = True\r\n if weather is None:\r\n weather = False\r\n else:\r\n weather = True\r\n alarms.insert(0, {\"title\":date, \"content\":name, \"news\":news, \"weather\":weather, \"id\":1})\r\n set_alarms(alarms[0], s)\r\n logging.info(\"Alarm created in set_new_alarm()\")",
"def get_play_actions(db, match_id, verbose=False):\n try:\n \n events = list(filter(lambda x: x['matchId'] == match_id and x['matchPeriod'] in ['1H', '2H'],db))\n half_offset = {'2H' : max([x['eventSec'] for x in events if x['matchPeriod']=='1H']),\n '1H':0}\n events = sorted(events, key = lambda x: x['eventSec'] + half_offset[x['matchPeriod']])\n first_secondhalf_evt = sorted(filter(lambda x: x['matchPeriod'] == '2H',events), key = lambda x: x['eventSec'])[0]\n ## add a fake event representing the start and end of the game and the second half start\n events.insert(0, START_OF_GAME_EVENT)\n events.insert(events.index(first_secondhalf_evt),START_2ND_HALF)\n events.append(END_OF_GAME_EVENT)\n\n play_actions = []\n\n time, index, current_action, current_half = 0.0, 1, [], '1H'\n previous_event = events[0]\n \n while index < len(events) - 2:\n\n current_event = events[index]\n # if the action stops by an game interruption\n if is_interruption(current_event, current_half):\n if current_event['eventId'] not in [-1,-2]: #delete fake events\n current_action.append(current_event)\n play_actions.append(('interruption', current_action))\n current_action = []\n\n elif is_penalty(current_event):\n next_event = events[index + 1]\n\n if is_save_attempt(next_event) or is_reflexes(next_event):\n index += 1\n current_action.append(current_event)\n \n play_actions.append(('penalty', current_action))\n current_action = []\n else:\n current_action.append(current_event)\n\n elif is_shot(current_event):\n next_event = events[index + 1]\n\n if is_interruption(next_event, current_half):\n index += 1\n current_action.append(current_event)\n if current_event['eventId'] not in [-1,-2]: #delete fake events\n current_action.append(next_event)\n play_actions.append(('shot', current_action))\n current_action = []\n\n ## IF THERE IS A SAVE ATTEMPT OR REFLEXES; GO TOGETHER\n elif is_save_attempt(next_event) or is_reflexes(next_event):\n index += 1\n current_action.append(current_event)\n current_action.append(next_event)\n play_actions.append(('shot', current_action))\n current_action = []\n\n else:\n current_action.append(current_event)\n play_actions.append(('shot', current_action))\n current_action = []\n\n elif is_ball_lost(current_event, previous_event):\n\n current_action.append(current_event)\n play_actions.append(('ball lost', current_action))\n current_action = [current_event]\n\n else:\n current_action.append(current_event)\n\n time = current_event['eventSec']\n current_half = current_event['matchPeriod']\n index += 1\n\n if not is_duel(current_event):\n previous_event = current_event\n\n events.remove(START_OF_GAME_EVENT)\n events.remove(END_OF_GAME_EVENT)\n events.remove(START_2ND_HALF)\n\n return play_actions\n except TypeError:\n \n return []",
"def get_alarm_by_tag(self, tag):\n\n alarm = self._alarm_manager.get_alarm_by_tag(tag)\n\n return alarm",
"def addMatch(self, id, match):\n self._matches[id] = match",
"def get(self, match=None, uid=None, **kwargs):\n self.match = match\n self.endpoint = self.EndpointURI()\n return super(MatchResource, self).get(uid, **kwargs)",
"def test_eval_alarm(self):\n def get_state_update_value(h):\n \"\"\"\n \n \"\"\"\n oldstate = h.data['oldState']['stateValue']\n newstate = h.data['newState']['stateValue']\n querydate = h.data['newState']['stateReasonData']['queryDate']\n querydate = utils.parse_strtime(querydate)\n return oldstate, newstate, querydate \n \n test_uuid = str(uuid.uuid4())\n alarmname = \"TestEvalAlarm_\" + test_uuid\n metricname = \"TestEvalMetric_\" + test_uuid\n namespace = \"unittest\"\n unit = \"Percent\"\n dimensions = {\"test_id\":test_uuid}\n threshold = 2.0\n \n # create metric alarm\n alarm = MetricAlarm(name=alarmname, metric=metricname,\n namespace=namespace, statistic=\"Average\",\n comparison=\">\", threshold=threshold,\n period=60, evaluation_periods=1, unit=unit,\n dimensions=dimensions)\n self.synaps.put_metric_alarm(alarm)\n \n # due to put_metric_alarm is asynchronous\n time.sleep(ASYNC_WAIT)\n \n alarm_time = datetime.datetime.utcnow().replace(second=0,\n microsecond=0)\n self.synaps.put_metric_data(namespace=namespace, name=metricname,\n value=threshold + 1, timestamp=alarm_time,\n unit=unit, dimensions=dimensions)\n\n time.sleep(60 * 5)\n\n ok_time = datetime.datetime.utcnow().replace(second=0,\n microsecond=0) \n self.synaps.put_metric_data(namespace=namespace, name=metricname,\n value=threshold - 2, timestamp=ok_time,\n unit=unit, dimensions=dimensions)\n\n time.sleep(60 * 5)\n \n histories = self.synaps.describe_alarm_history(alarm_name=alarmname,\n history_item_type=\"StateUpdate\")\n histories.sort(cmp=lambda a, b: cmp(a.timestamp, b.timestamp))\n\n result = map(get_state_update_value, histories)\n \n expected = (('INSUFFICIENT_DATA', 'ALARM', alarm_time),\n ('ALARM', 'INSUFFICIENT_DATA', None),\n ('INSUFFICIENT_DATA', 'OK', ok_time),\n ('OK', 'INSUFFICIENT_DATA', None))\n \n failmsg = \"expected: %s real: %s\" % (expected, result)\n \n self.assertEqual(len(result), len(expected), msg=failmsg)\n \n for ((r_new, r_old, r_time), (e_new, e_old, e_time)) in zip(result,\n expected):\n self.assertEqual(r_new, e_new, msg=failmsg)\n self.assertEqual(r_old, e_old, msg=failmsg)\n if e_time:\n self.assertTrue((r_time - e_time) < timedelta(seconds=300),\n msg=failmsg)\n \n self.synaps.delete_alarms(alarms=[alarmname])"
] | [
"0.7166458",
"0.57819176",
"0.5482392",
"0.531153",
"0.5286329",
"0.5285556",
"0.52826345",
"0.5204021",
"0.51818633",
"0.5035592",
"0.5033543",
"0.49758896",
"0.49523956",
"0.4948084",
"0.49357826",
"0.49351478",
"0.49312088",
"0.4913772",
"0.48917705",
"0.48898852",
"0.48856574",
"0.4846137",
"0.48378626",
"0.48322186",
"0.4823487",
"0.48197",
"0.47633675",
"0.47585475",
"0.47502753",
"0.47171354"
] | 0.798852 | 0 |
Returns a list of durations | def get_dur(self):
return [char.get_dur() for char in self.string] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getDurations(self):\n return self.durations",
"def durations_per_type(self):\n pass",
"def getDuration(self):\n return (self._get_int('duration'), self._attributes.getDivisions())",
"def get_duration(self):\n duration = 0\n\n for entry in self.entries:\n duration += entry.get_duration()\n return duration",
"def find_duration(data):\n t = [i[0] for i in data]\n duration = t[len(t) - 1] - t[0]\n logging.info('Calculated duration: %s', duration)\n return duration",
"def _get_dur(inst):\n for fil, sig in inst['localization'].items():\n ke = sorted([int(i) for i in sig.keys()], key=int)\n if (len(ke) != 2):\n log(0, \"Error: Instance has two ranges\\n%s\" % (str(inst)))\n exit(1)\n dur = ke[1] - ke[0]\n assert dur > 0, \"Duration <= 0\"\n return(dur)",
"def timings(self):\r\n return self._timings",
"def get_times(self):\n times = []\n for i in range(1, len(self.events)):\n times.append(self.events[i-1].elapsed_time(self.events[i]))\n return times",
"def duration(self):\n pass",
"def duration(self):\n pass",
"def duration_steps_readable(durations):\n duration_strings = list()\n for i, minutes in enumerate(durations):\n duration_strings.append(minutes_readable(minutes))\n return duration_strings",
"def get_duration(self):\n dur = 0\n for clu in self._clusters:\n dur += self._clusters[clu].get_duration()\n return dur",
"def crop_duration(self):\n\n s_date = self.soil_inputs.sowing_date.values[0]\n h_date = self.soil_inputs.harvest_date.values[0]\n\n return [s_date, h_date]",
"def duration_in_seconds(self):\n return self.get_data(\"duration_in_seconds\")",
"def list_times(self, start: int = None, end: int = None) -> List:\n return [i.time for i in self.data[start:end]]",
"def _gather_durations(ret, minion_id):\n if isinstance(ret.data, dict) and isinstance(\n ret.data.get(minion_id, None), dict\n ):\n duration = 0\n for _, state_ret in ret.data[minion_id].items():\n try:\n duration += state_ret[\"duration\"]\n except KeyError:\n break\n else:\n return duration\n pytest.skip(\"Something went wrong with the states, skipping.\")",
"def _get_duration(self):\n durations = [series_episode.duration for series_episode in SeriesEpisode.objects.filter(series=self)]\n return reduce(lambda x, y: x + y, durations) if len(durations) > 0 else 0",
"def get_duration(self):\n seconds = self.duration.total_seconds()\n mins, secs = divmod(seconds, 60)\n return int(mins), int(secs)",
"def get_frame_durations(file):\n pos = file.tell()\n\n frame_durations = []\n last_frame_timestamp = None\n def collect_timestamps(frame, timestamp):\n timestamp = round(timestamp*1000)\n\n nonlocal last_frame_timestamp\n if last_frame_timestamp is not None:\n duration = timestamp - last_frame_timestamp\n frame_durations.append(duration)\n last_frame_timestamp = timestamp\n\n result = ExportMJPEG(frame_callback=collect_timestamps)\n mkvparse.mkvparse(file, result)\n\n # We don't have durations from the frame or a file duration. ugoira_downloader_mjpeg\n # duplicates the last frame with a zero duration to give the last frame its\n # duration so seamless looping works. Just match that here so everything round-trips\n # cleanly.\n frame_durations.append(0)\n\n # Return to the original file position.\n file.seek(pos)\n\n return frame_durations",
"def duration(self):\n return int(sum(t.duration for t in self.tasks) / 3600)",
"def duration(self):\n return self._end - self._begin",
"def getTimes():",
"def getTimes():",
"def getTimes():",
"def duration(self) -> float:\n return self.delta_t * len(self)",
"def duration(self) -> float:\n return self.delta_t * len(self)",
"def _generate_case_durations(self):\n return pd.Series(self.df_cases[\"Median Duration\"].values, index=self.df_cases[\"CaseID\"]).to_dict()",
"def duration(self):\r\n return self.stop - self.start",
"def time_list(self):\n return (self.N_T * (np.arange(self.N_itr) + 1) /\n self.N_itr * 1000 * self.DT)",
"def durations(on_series, on_or_off, ignore_n_off_samples=None,\n sample_period=None):\n # TODO: ignore_n_off_samples should be generalised so it does the\n # right thing when `on_or_off='off'`\n if sample_period is None:\n sample_period = get_sample_period(on_series)\n events = on_off_events(on_series, ignore_n_off_samples)\n delta_time_array = np.diff(events.index.values).astype(int) / 1E9\n delta_time = pd.Series(delta_time_array, index=events.index[:-1])\n diff_for_mode = 1 if on_or_off == 'on' else -1\n events_for_mode = events == diff_for_mode\n durations = delta_time[events_for_mode]\n if ignore_n_off_samples is not None:\n durations = durations[durations > sample_period * ignore_n_off_samples]\n\n durations.name = 'seconds ' + on_or_off + ' for ' + str(on_series.name)\n return durations"
] | [
"0.81575495",
"0.726394",
"0.6876918",
"0.676946",
"0.6652773",
"0.65874827",
"0.65300137",
"0.64949",
"0.6487036",
"0.6487036",
"0.64002234",
"0.6309793",
"0.6307618",
"0.630678",
"0.62959886",
"0.62911266",
"0.6283895",
"0.62529457",
"0.6236019",
"0.6182811",
"0.6182114",
"0.61713517",
"0.61713517",
"0.61713517",
"0.61663085",
"0.61663085",
"0.61630917",
"0.61382425",
"0.60993737",
"0.6068279"
] | 0.74434316 | 1 |
My new constructor, which makes sure that the ``FRAME_TOOL_WINDOW`` style is not passed through to the ``AuiFloatingFrame`` constructor | def __init__(self, *args, **kwargs):
if 'style' in kwargs:
style = kwargs['style']
# This is the default style, as defined
# in the AuiFloatingFrame constructor
else:
style = (wx.FRAME_TOOL_WINDOW |
wx.FRAME_FLOAT_ON_PARENT |
wx.FRAME_NO_TASKBAR |
wx.CLIP_CHILDREN)
if fwidgets.inSSHSession():
style &= ~wx.FRAME_TOOL_WINDOW
kwargs['style'] = style
super().__init__(*args, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _AuiDockingGuide_init(self, *args, **kwargs):\n\n if 'style' in kwargs:\n style = kwargs['style']\n\n # This is the default style, as defined\n # in the AuiDockingGuide constructor\n else:\n style = (wx.FRAME_TOOL_WINDOW |\n wx.FRAME_STAY_ON_TOP |\n wx.FRAME_NO_TASKBAR |\n wx.NO_BORDER)\n\n if fwidgets.inSSHSession():\n style &= ~wx.FRAME_TOOL_WINDOW\n\n kwargs['style'] = style\n\n _AuiDockingGuide_real_init(self, *args, **kwargs)",
"def CreateFloatingFrame(self, parent, pane_info):\r\n\r\n return AuiFloatingFrame(parent, self, pane_info)",
"def __init__(self, parent):\r\n\r\n AuiDockingGuide.__init__(self, parent, style=wx.FRAME_TOOL_WINDOW | wx.STAY_ON_TOP |\r\n wx.FRAME_NO_TASKBAR | wx.NO_BORDER | wx.FRAME_SHAPED,\r\n name=\"auiCenterDockTarget\")\r\n\r\n self.Hide()\r\n\r\n self.CreateShapesWithStyle()\r\n self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)\r\n \r\n if wx.Platform == \"__WXGTK__\":\r\n self.Bind(wx.EVT_WINDOW_CREATE, self.SetGuideShape)\r\n else:\r\n self.SetGuideShape()\r\n \r\n self.SetSize(self.region.GetBox().GetSize())\r\n\r\n self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)\r\n self.Bind(wx.EVT_PAINT, self.OnPaint)",
"def __init__(self, parent, id=wx.ID_ANY, title=\"\", pos=wx.DefaultPosition,\r\n size=wx.Size(1, 1), style=wx.FRAME_TOOL_WINDOW | wx.FRAME_FLOAT_ON_PARENT |\r\n wx.FRAME_NO_TASKBAR | wx.NO_BORDER | wx.FRAME_SHAPED,\r\n name=\"auiHintWindow\"):\r\n if wx.Platform == '__WXMAC__' and style & wx.FRAME_SHAPED:\r\n # Having the shaped frame causes the frame to not be visible\r\n # with the transparent style hints.\r\n style -= wx.FRAME_SHAPED\r\n\r\n wx.Frame.__init__(self, parent, id, title, pos, size, style, name=name)\r\n \r\n self._blindMode = False\r\n self.SetBackgroundColour(colourHintBackground)\r\n \r\n # Can't set background colour on a frame on wxMac\r\n # so add a panel to set the colour on.\r\n if wx.Platform == '__WXMAC__':\r\n sizer = wx.BoxSizer(wx.HORIZONTAL)\r\n self.panel = wx.Panel(self)\r\n sizer.Add(self.panel, 1, wx.EXPAND)\r\n self.SetSizer(sizer)\r\n self.panel.SetBackgroundColour(colourHintBackground)\r\n\r\n self.Bind(wx.EVT_SIZE, self.OnSize)",
"def __init__(self, parent, owner_mgr, pane=None, id=wx.ID_ANY, title=\"\",\r\n style=wx.FRAME_TOOL_WINDOW | wx.FRAME_FLOAT_ON_PARENT |\r\n wx.FRAME_NO_TASKBAR | wx.CLIP_CHILDREN):\r\n \r\n if pane and pane.IsResizeable():\r\n style += wx.RESIZE_BORDER\r\n if pane:\r\n self._is_toolbar = pane.IsToolbar()\r\n\r\n self._useNativeMiniframes = False\r\n if AuiManager_UseNativeMiniframes(owner_mgr):\r\n # On wxMac we always use native miniframes\r\n self._useNativeMiniframes = True\r\n style += wx.CAPTION + wx.SYSTEM_MENU\r\n if pane.HasCloseButton():\r\n style += wx.CLOSE_BOX\r\n if pane.HasMaximizeButton():\r\n style += wx.MAXIMIZE_BOX\r\n if pane.HasMinimizeButton():\r\n style += wx.MINIMIZE_BOX\r\n \r\n wx.MiniFrame.__init__(self, parent, id, title, pos=pane.floating_pos,\r\n size=pane.floating_size, style=style, name=\"auiFloatingFrame\")\r\n\r\n self._fly_timer = wx.Timer(self, wx.ID_ANY)\r\n self._check_fly_timer = wx.Timer(self, wx.ID_ANY)\r\n \r\n self.Bind(wx.EVT_CLOSE, self.OnClose)\r\n self.Bind(wx.EVT_SIZE, self.OnSize)\r\n self.Bind(wx.EVT_ACTIVATE, self.OnActivate)\r\n self.Bind(wx.EVT_TIMER, self.OnCheckFlyTimer, self._check_fly_timer)\r\n self.Bind(wx.EVT_TIMER, self.OnFlyTimer, self._fly_timer)\r\n self.Bind(EVT_AUI_FIND_MANAGER, self.OnFindManager)\r\n\r\n if self._useNativeMiniframes:\r\n self.Bind(wx.EVT_MOVE, self.OnMoveEvent)\r\n self.Bind(wx.EVT_MOVING, self.OnMoveEvent)\r\n self.Bind(wx.EVT_IDLE, self.OnIdle)\r\n self._useNativeMiniframes = True\r\n self.SetExtraStyle(wx.WS_EX_PROCESS_IDLE)\r\n else:\r\n self.Bind(wx.EVT_MOVE, self.OnMove)\r\n\r\n self._fly = False\r\n self._send_size = True\r\n self._alpha_amount = 255\r\n \r\n self._owner_mgr = owner_mgr\r\n self._moving = False\r\n self._lastDirection = None\r\n self._transparent = 255\r\n\r\n self._last_rect = wx.Rect()\r\n self._last2_rect = wx.Rect()\r\n self._last3_rect = wx.Rect()\r\n\r\n self._mgr = AuiManager()\r\n self._mgr.SetManagedWindow(self)\r\n self._mgr.SetArtProvider(owner_mgr.GetArtProvider())\r\n self._mgr.SetAGWFlags(owner_mgr.GetAGWFlags())",
"def __init__(self, parent, id=wx.ID_ANY, title=\"\", pos=wx.DefaultPosition,\r\n size=wx.DefaultSize, style=wx.FRAME_TOOL_WINDOW | wx.STAY_ON_TOP |\r\n wx.FRAME_NO_TASKBAR | wx.NO_BORDER, name=\"AuiDockingGuide\"):\r\n\r\n wx.Frame.__init__(self, parent, id, title, pos, size, style, name=name)",
"def __init__(self, parent, direction=0):\r\n\r\n self._direction = direction\r\n\r\n style = wx.FRAME_TOOL_WINDOW | wx.STAY_ON_TOP | \\\r\n wx.FRAME_NO_TASKBAR | wx.NO_BORDER\r\n\r\n # Use of FRAME_SHAPED on wxMac causes the frame to be visible\r\n # breaking the docking hints.\r\n if wx.Platform != '__WXMAC__':\r\n style |= wx.FRAME_SHAPED\r\n\r\n AuiDockingGuide.__init__(self, parent, style=style, name=\"auiSingleDockTarget\")\r\n \r\n self.Hide()\r\n\r\n useAero = GetManager(self.GetParent()).GetAGWFlags() & AUI_MGR_AERO_DOCKING_GUIDES\r\n useWhidbey = GetManager(self.GetParent()).GetAGWFlags() & AUI_MGR_WHIDBEY_DOCKING_GUIDES\r\n \r\n self._useAero = useAero or useWhidbey\r\n self._valid = True\r\n \r\n if useAero:\r\n sizeX, sizeY = aeroguideSizeX, aeroguideSizeY\r\n elif useWhidbey:\r\n sizeX, sizeY = whidbeySizeX, whidbeySizeY\r\n else:\r\n sizeX, sizeY = guideSizeX, guideSizeY\r\n\r\n if direction not in [wx.TOP, wx.BOTTOM]:\r\n sizeX, sizeY = sizeY, sizeX\r\n\r\n if self._useAero:\r\n self.CreateShapesWithStyle(useWhidbey)\r\n \r\n if wx.Platform == \"__WXGTK__\":\r\n self.Bind(wx.EVT_WINDOW_CREATE, self.SetGuideShape)\r\n else:\r\n self.SetGuideShape()\r\n \r\n self.SetSize(self.region.GetBox().GetSize())\r\n else:\r\n self.SetSize((sizeX, sizeY))\r\n \r\n self.rect = wx.Rect(0, 0, sizeX, sizeY)\r\n\r\n if self._useAero:\r\n useAero = (useWhidbey and [2] or [1])[0]\r\n else:\r\n useAero = 0\r\n \r\n self.target = AuiDockingGuideWindow(self, self.rect, direction, False, useAero)",
"def __init__(self, parent, rect, direction=0, center=False, useAero=False):\r\n\r\n wx.Window.__init__(self, parent, -1, rect.GetPosition(), rect.GetSize(), wx.NO_BORDER)\r\n\r\n self._direction = direction\r\n self._center = center\r\n self._valid = True\r\n self._useAero = useAero\r\n \r\n self._bmp_unfocus, self._bmp_focus = GetDockingImage(direction, useAero, center)\r\n \r\n self._currentImage = self._bmp_unfocus\r\n self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)\r\n \r\n self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)\r\n self.Bind(wx.EVT_PAINT, self.OnPaint)",
"def __init__(self, master, _type=REGULAR, **kw):\r\n Frame.__init__(self, master, **kw)\r\n self.main_frame = Frame(self, bd=1)\r\n self.main_frame.pack()",
"def __init__(self):\r\n\r\n object.__init__(self)\r\n \r\n self.dock_direction = 0\r\n self.dock_layer = 0\r\n self.dock_row = 0\r\n self.size = 0\r\n self.min_size = 0\r\n self.resizable = True\r\n self.fixed = False\r\n self.toolbar = False\r\n self.rect = wx.Rect()\r\n self.panes = []",
"def getFrame(self, relief):\n frame = ttk.Frame(self.master)\n frame['padding'] = (5, 5)\n frame['borderwidth'] = 2\n frame['relief'] = relief\n return frame",
"def __init__(self, target=None):\n\t\ttkinter.Frame.__init__(self, target)\n\t\tself.create_widgets()",
"def __init__(self, scene, parent=None, flags=Qt.WindowFlags()):\n super(CustomQFrame, self).__init__(parent=parent, flags=flags)\n self.scene = scene\n self.parent = parent",
"def __init__(self, *args, **kwargs):\r\n\r\n tk.Tk.__init__(self, *args, **kwargs)\r\n\r\n self.title(TITLE)\r\n self.geometry(f\"{WIDTH}x{HEIGHT}\")\r\n self.config(background=\"pale turquoise\")\r\n\r\n self.scroll_frame = VerticalScrolledFrame(self)\r\n self.scroll_frame.grid(column=1, row=3)\r\n\r\n self.place_widgets()",
"def __init__(self, parent):\n super(DummyStageInterface, self).__init__(parent)\n\n open_bmp = wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN, wx.ART_TOOLBAR,\n self.tb_size)\n self.tb.AddLabelTool(wx.ID_OPEN, \"Open\", open_bmp)\n\n self.tb.Realize()",
"def __init__(self, frame):\n self.entry = Entry(frame, width = 1)",
"def __init__(self, master=None, cnf={}, **kw):\n if kw:\n cnf = _cnfmerge((cnf, kw))\n extra = ()\n for wmkey in ['screen', 'class_', 'class', 'visual',\n 'colormap']:\n if wmkey in cnf:\n val = cnf[wmkey]\n # TBD: a hack needed because some keys\n # are not valid as keyword arguments\n if wmkey[-1] == '_': opt = '-'+wmkey[:-1]\n else: opt = '-'+wmkey\n extra = extra + (opt, val)\n del cnf[wmkey]\n BaseWidget.__init__(self, master, 'toplevel', cnf, {}, extra)\n root = self._root()\n self.iconname(root.iconname())\n self.title(root.title())\n self.protocol(\"WM_DELETE_WINDOW\", self.destroy)",
"def __init__(self, parent=None, name=None, f=0):\n QFrame.__init__(self, parent, name, f)\n \n self.setAcceptDrops(1)\n self.Layout = QHBoxLayout(self,0,0,\"poolLayout\")\n \n self.backplate = QFrame(self, 'backplate')\n self.backplate.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n self.Layout.addWidget(self.backplate)\n \n self.trackFrames = []",
"def __init__(self, other=None):\r\n\r\n if other:\r\n self.Assign(other)\r\n else:\r\n # window representing the docking target\r\n self.host = None\r\n # dock direction (top, bottom, left, right, center)\r\n self.dock_direction = AUI_DOCK_NONE",
"def __init__(self, parent, rokucontrol=None, is_error=False, error_message=''):\n tk.Frame.__init__(self, parent, background=\"black\")\n self.style = ttk.Style()\n self.parent = parent\n self.rokucontrol = rokucontrol\n\n self.init_ui()\n\n if is_error:\n self.place_error_message(error_message)\n else:\n self.place_buttons()\n self.place_app_buttons()",
"def __init__(self, master=None, margin=30):\r\n Frame.__init__(self, master, padx=margin, pady=margin)\r\n self.grid()\r\n self.widgets()\r\n self.behavior()",
"def __init__( self, a_parent_frame,\n a_title,\n a_title_color,\n button_width = 10,\n button_height = 2 ):\n a_frame = Tk.Frame( a_parent_frame,\n # bg =\"red\",\n bg = \"gray\", )\n\n a_frame.rowconfigure( 0, weight= 1 )\n a_frame.rowconfigure( 1, weight= 1 )\n\n a_frame.columnconfigure( 0, weight= 1 )\n #master.columnconfigure( 1, weight= 1 )\n self.frame = a_frame\n p_frame = a_frame\n\n a_frame = Tk.Frame( p_frame, bg = a_title_color, )\n # padx = 2, pady = 2, relief= Tk.GROOVE, )\n a_frame.grid( row = 0, column = 0 ,sticky = Tk.E + Tk.W )\n self.top_inner_frame = a_frame\n\n a_label = Tk.Label( a_frame,\n text = a_title,\n bg = a_title_color , )\n # relief = RAISED, )\n a_label.grid( row = 0, column = 0, )\n # columnspan = 1, sticky = Tk.W + Tk.E )\n\n a_frame = Tk.Frame( p_frame, )\n # bg = \"blue\", ) # use neutral color or the title color\n # padx = 2, pady = 2, relief= Tk.GROOVE, )\n a_frame.grid( row = 1, column = 0,sticky = Tk.E + Tk.W )\n self.bottom_inner_frame = a_frame\n\n self.button_width = button_width\n self.button_height = button_height\n self.button_row = 0\n self.button_column = 0",
"def __init__(self, parent):\r\n\r\n pre = wx.PrePyWindow()\r\n \r\n self._tabs = None\r\n self._rect = wx.Rect(0, 0, 200, 200)\r\n self._tab_ctrl_height = 20\r\n self._tab_rect = wx.Rect() \r\n self._parent = parent\r\n \r\n self.PostCreate(pre)",
"def __init__(self, frame, width, height):\n \n self.canvas = Tkinter.Canvas(frame, width = int(width), \n height = int(height))\n self.canvas.pack(side = CANVAS[\"POSITION\"])\n self.canvas.configure(background = check_color(CANVAS[\"BACKGROUND_COLOR\"]))",
"def __init__(self, parent, *args, **kwargs):\n tk.LabelFrame.__init__(self, parent, *args, **kwargs)\n self.canvas = MainCanvas(self, bg=\"orange\")\n self.canvas.pack(side='top', fill='both', expand=True)",
"def __init__(self, parent: View):\n self.parent = parent\n self.root = self.parent.root\n # Content frame\n self.frame = tk.Frame(self.parent.frame)\n # Reference\n self.visible = False",
"def __init__(self,*args, **kwargs):\n tk.Tk.__init__(self, *args, **kwargs)\n self.winfo_toplevel().title(\"ElogQP\")\n self.container = tk.Frame(self)\n self.container.pack(side=\"top\", fill=\"both\", expand=True)\n self.container.grid_rowconfigure(0, weight=1)\n self.container.grid_columnconfigure(0, weight=1)\n self.activeFrames = []\n for F in (Frames.frame_start.frame_start, Frames.frame_modules.frame_modules, Frames.frame_showError.frame_showError):\n self.createFrame(F, F.__name__)\n \n self.showFrame(\"frame_start\")",
"def createTopFrame(self):\n \n # g.trace(\"leoTkinterDialog\")\n \n self.root = g.app.root\n \n self.top = Tk.Toplevel(self.root)\n self.top.title(self.title)\n \n if not self.resizeable:\n self.top.resizable(0,0) # neither height or width is resizable.\n \n self.frame = Tk.Frame(self.top)\n self.frame.pack(side=\"top\",expand=1,fill=\"both\")\n \n if not self.canClose:\n self.top.protocol(\"WM_DELETE_WINDOW\", self.onClose)\n \n # Do this at idle time.\n def callback(top=self.top):\n g.app.gui.attachLeoIcon(top)\n \n self.top.after_idle(callback)",
"def __init__(self, screen, *args,\r\n parent=None, callbackOnChange=None,\r\n quitAskMsg=\"Quit? Are you sure?\",\r\n **kwargs,\r\n ):\r\n ## these kwargs will get passed to super, Frame\r\n #dkwargsForSelf = {\r\n # 'frameData':{}\r\n #}\r\n #Easy.DictOfDefaultsOntoObj( self, dkwargsForSelf, kwargs ) \r\n if 'data' not in kwargs:\r\n kwargs['data']={}\r\n \r\n self.defaultLayoutColumnWidths = [1, 18, 1]\r\n self.defaultFooterLayoutColumnWidths = [1, 1, 1]\r\n self.quitAskMsg=quitAskMsg\r\n self.parent=parent,\r\n \r\n self.callbackOnChange = callbackOnChange\r\n super().__init__(screen, *args, **kwargs )\r\n \r\n if not hasattr( self, \"widgets\" ):\r\n self.widgets={}\r\n if not hasattr( self, \"layouts\" ):\r\n self.layouts={}\r\n \r\n self.createLayout('DefaultLayout', self.defaultLayoutColumnWidths)\r\n self.createLayout('DefaultFooterLayout', self.defaultFooterLayoutColumnWidths )",
"def __init__( self, parent_frame, a_title = \"a title\", width = None, height = None ):\n self.version = \"2021_08_04\"\n self.parent = parent_frame\n self.title = a_title\n self.click_function = None # set later or externally\n self.frame = None # the frame this is in use xxx.frame\n self.listbox = None # the listbox\n self.outer_frame = None\n\n if width is None:\n width = 100\n\n if height is None:\n height = 100\n\n self._make_titled_listbox_( width, height )"
] | [
"0.70367366",
"0.6843715",
"0.6642154",
"0.6578007",
"0.654084",
"0.6410062",
"0.6291096",
"0.62042177",
"0.594362",
"0.59111077",
"0.5909552",
"0.5876681",
"0.57657516",
"0.56834096",
"0.56624097",
"0.564891",
"0.56267667",
"0.56264985",
"0.5610823",
"0.55926937",
"0.55911565",
"0.55723584",
"0.5554378",
"0.55081445",
"0.547997",
"0.5417965",
"0.5415419",
"0.5409078",
"0.54044497",
"0.5395316"
] | 0.8233745 | 0 |
I am also monkeypatching the ``wx.lib.agw.aui.AuiDockingGuide.__init__`` method, because in this instance, when running over SSH/X11, the ``wx.FRAME_TOOL_WINDOW`` style seems to result in the docking guide frames being given title bars, which is quite undesirable. I cannot patch the entire class in the aui package, because it is used as part of a class hierarchy. So I am just patching the method. | def _AuiDockingGuide_init(self, *args, **kwargs):
if 'style' in kwargs:
style = kwargs['style']
# This is the default style, as defined
# in the AuiDockingGuide constructor
else:
style = (wx.FRAME_TOOL_WINDOW |
wx.FRAME_STAY_ON_TOP |
wx.FRAME_NO_TASKBAR |
wx.NO_BORDER)
if fwidgets.inSSHSession():
style &= ~wx.FRAME_TOOL_WINDOW
kwargs['style'] = style
_AuiDockingGuide_real_init(self, *args, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, parent, direction=0):\r\n\r\n self._direction = direction\r\n\r\n style = wx.FRAME_TOOL_WINDOW | wx.STAY_ON_TOP | \\\r\n wx.FRAME_NO_TASKBAR | wx.NO_BORDER\r\n\r\n # Use of FRAME_SHAPED on wxMac causes the frame to be visible\r\n # breaking the docking hints.\r\n if wx.Platform != '__WXMAC__':\r\n style |= wx.FRAME_SHAPED\r\n\r\n AuiDockingGuide.__init__(self, parent, style=style, name=\"auiSingleDockTarget\")\r\n \r\n self.Hide()\r\n\r\n useAero = GetManager(self.GetParent()).GetAGWFlags() & AUI_MGR_AERO_DOCKING_GUIDES\r\n useWhidbey = GetManager(self.GetParent()).GetAGWFlags() & AUI_MGR_WHIDBEY_DOCKING_GUIDES\r\n \r\n self._useAero = useAero or useWhidbey\r\n self._valid = True\r\n \r\n if useAero:\r\n sizeX, sizeY = aeroguideSizeX, aeroguideSizeY\r\n elif useWhidbey:\r\n sizeX, sizeY = whidbeySizeX, whidbeySizeY\r\n else:\r\n sizeX, sizeY = guideSizeX, guideSizeY\r\n\r\n if direction not in [wx.TOP, wx.BOTTOM]:\r\n sizeX, sizeY = sizeY, sizeX\r\n\r\n if self._useAero:\r\n self.CreateShapesWithStyle(useWhidbey)\r\n \r\n if wx.Platform == \"__WXGTK__\":\r\n self.Bind(wx.EVT_WINDOW_CREATE, self.SetGuideShape)\r\n else:\r\n self.SetGuideShape()\r\n \r\n self.SetSize(self.region.GetBox().GetSize())\r\n else:\r\n self.SetSize((sizeX, sizeY))\r\n \r\n self.rect = wx.Rect(0, 0, sizeX, sizeY)\r\n\r\n if self._useAero:\r\n useAero = (useWhidbey and [2] or [1])[0]\r\n else:\r\n useAero = 0\r\n \r\n self.target = AuiDockingGuideWindow(self, self.rect, direction, False, useAero)",
"def __init__(self, parent, id=wx.ID_ANY, title=\"\", pos=wx.DefaultPosition,\r\n size=wx.DefaultSize, style=wx.FRAME_TOOL_WINDOW | wx.STAY_ON_TOP |\r\n wx.FRAME_NO_TASKBAR | wx.NO_BORDER, name=\"AuiDockingGuide\"):\r\n\r\n wx.Frame.__init__(self, parent, id, title, pos, size, style, name=name)",
"def __init__(self, parent):\r\n\r\n AuiDockingGuide.__init__(self, parent, style=wx.FRAME_TOOL_WINDOW | wx.STAY_ON_TOP |\r\n wx.FRAME_NO_TASKBAR | wx.NO_BORDER | wx.FRAME_SHAPED,\r\n name=\"auiCenterDockTarget\")\r\n\r\n self.Hide()\r\n\r\n self.CreateShapesWithStyle()\r\n self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)\r\n \r\n if wx.Platform == \"__WXGTK__\":\r\n self.Bind(wx.EVT_WINDOW_CREATE, self.SetGuideShape)\r\n else:\r\n self.SetGuideShape()\r\n \r\n self.SetSize(self.region.GetBox().GetSize())\r\n\r\n self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)\r\n self.Bind(wx.EVT_PAINT, self.OnPaint)",
"def __init__(self, parent, id=wx.ID_ANY, title=\"\", pos=wx.DefaultPosition,\r\n size=wx.Size(1, 1), style=wx.FRAME_TOOL_WINDOW | wx.FRAME_FLOAT_ON_PARENT |\r\n wx.FRAME_NO_TASKBAR | wx.NO_BORDER | wx.FRAME_SHAPED,\r\n name=\"auiHintWindow\"):\r\n if wx.Platform == '__WXMAC__' and style & wx.FRAME_SHAPED:\r\n # Having the shaped frame causes the frame to not be visible\r\n # with the transparent style hints.\r\n style -= wx.FRAME_SHAPED\r\n\r\n wx.Frame.__init__(self, parent, id, title, pos, size, style, name=name)\r\n \r\n self._blindMode = False\r\n self.SetBackgroundColour(colourHintBackground)\r\n \r\n # Can't set background colour on a frame on wxMac\r\n # so add a panel to set the colour on.\r\n if wx.Platform == '__WXMAC__':\r\n sizer = wx.BoxSizer(wx.HORIZONTAL)\r\n self.panel = wx.Panel(self)\r\n sizer.Add(self.panel, 1, wx.EXPAND)\r\n self.SetSizer(sizer)\r\n self.panel.SetBackgroundColour(colourHintBackground)\r\n\r\n self.Bind(wx.EVT_SIZE, self.OnSize)",
"def __init__(self, parent, owner_mgr, pane=None, id=wx.ID_ANY, title=\"\",\r\n style=wx.FRAME_TOOL_WINDOW | wx.FRAME_FLOAT_ON_PARENT |\r\n wx.FRAME_NO_TASKBAR | wx.CLIP_CHILDREN):\r\n \r\n if pane and pane.IsResizeable():\r\n style += wx.RESIZE_BORDER\r\n if pane:\r\n self._is_toolbar = pane.IsToolbar()\r\n\r\n self._useNativeMiniframes = False\r\n if AuiManager_UseNativeMiniframes(owner_mgr):\r\n # On wxMac we always use native miniframes\r\n self._useNativeMiniframes = True\r\n style += wx.CAPTION + wx.SYSTEM_MENU\r\n if pane.HasCloseButton():\r\n style += wx.CLOSE_BOX\r\n if pane.HasMaximizeButton():\r\n style += wx.MAXIMIZE_BOX\r\n if pane.HasMinimizeButton():\r\n style += wx.MINIMIZE_BOX\r\n \r\n wx.MiniFrame.__init__(self, parent, id, title, pos=pane.floating_pos,\r\n size=pane.floating_size, style=style, name=\"auiFloatingFrame\")\r\n\r\n self._fly_timer = wx.Timer(self, wx.ID_ANY)\r\n self._check_fly_timer = wx.Timer(self, wx.ID_ANY)\r\n \r\n self.Bind(wx.EVT_CLOSE, self.OnClose)\r\n self.Bind(wx.EVT_SIZE, self.OnSize)\r\n self.Bind(wx.EVT_ACTIVATE, self.OnActivate)\r\n self.Bind(wx.EVT_TIMER, self.OnCheckFlyTimer, self._check_fly_timer)\r\n self.Bind(wx.EVT_TIMER, self.OnFlyTimer, self._fly_timer)\r\n self.Bind(EVT_AUI_FIND_MANAGER, self.OnFindManager)\r\n\r\n if self._useNativeMiniframes:\r\n self.Bind(wx.EVT_MOVE, self.OnMoveEvent)\r\n self.Bind(wx.EVT_MOVING, self.OnMoveEvent)\r\n self.Bind(wx.EVT_IDLE, self.OnIdle)\r\n self._useNativeMiniframes = True\r\n self.SetExtraStyle(wx.WS_EX_PROCESS_IDLE)\r\n else:\r\n self.Bind(wx.EVT_MOVE, self.OnMove)\r\n\r\n self._fly = False\r\n self._send_size = True\r\n self._alpha_amount = 255\r\n \r\n self._owner_mgr = owner_mgr\r\n self._moving = False\r\n self._lastDirection = None\r\n self._transparent = 255\r\n\r\n self._last_rect = wx.Rect()\r\n self._last2_rect = wx.Rect()\r\n self._last3_rect = wx.Rect()\r\n\r\n self._mgr = AuiManager()\r\n self._mgr.SetManagedWindow(self)\r\n self._mgr.SetArtProvider(owner_mgr.GetArtProvider())\r\n self._mgr.SetAGWFlags(owner_mgr.GetAGWFlags())",
"def UpdateDockingGuides(self, paneInfo):\r\n\r\n if len(self._guides) == 0:\r\n self.CreateGuideWindows()\r\n\r\n captionSize = self._art.GetMetric(AUI_DOCKART_CAPTION_SIZE)\r\n frameRect = GetInternalFrameRect(self._frame, self._docks)\r\n mousePos = wx.GetMousePosition()\r\n\r\n for indx, guide in enumerate(self._guides):\r\n \r\n pt = wx.Point()\r\n guide_size = guide.host.GetSize()\r\n if not guide.host:\r\n raise Exception(\"Invalid docking host\")\r\n\r\n direction = guide.dock_direction\r\n\r\n if direction == AUI_DOCK_LEFT:\r\n pt.x = frameRect.x + guide_size.x / 2 + 16\r\n pt.y = frameRect.y + frameRect.height / 2\r\n\r\n elif direction == AUI_DOCK_TOP:\r\n pt.x = frameRect.x + frameRect.width / 2\r\n pt.y = frameRect.y + guide_size.y / 2 + 16\r\n\r\n elif direction == AUI_DOCK_RIGHT:\r\n pt.x = frameRect.x + frameRect.width - guide_size.x / 2 - 16\r\n pt.y = frameRect.y + frameRect.height / 2\r\n\r\n elif direction == AUI_DOCK_BOTTOM:\r\n pt.x = frameRect.x + frameRect.width / 2\r\n pt.y = frameRect.y + frameRect.height - guide_size.y / 2 - 16\r\n\r\n elif direction == AUI_DOCK_CENTER:\r\n rc = paneInfo.window.GetScreenRect()\r\n pt.x = rc.x + rc.width / 2\r\n pt.y = rc.y + rc.height / 2\r\n if paneInfo.HasCaption():\r\n pt.y -= captionSize / 2\r\n elif paneInfo.HasCaptionLeft():\r\n pt.x -= captionSize / 2\r\n\r\n # guide will be centered around point 'pt'\r\n targetPosition = wx.Point(pt.x - guide_size.x / 2, pt.y - guide_size.y / 2)\r\n\r\n if guide.host.GetPosition() != targetPosition:\r\n guide.host.Move(targetPosition)\r\n \r\n guide.host.AeroMove(targetPosition)\r\n\r\n if guide.dock_direction == AUI_DOCK_CENTER:\r\n guide.host.ValidateNotebookDocking(paneInfo.IsNotebookDockable())\r\n\r\n guide.host.UpdateDockGuide(mousePos)\r\n \r\n paneInfo.window.Lower()",
"def __init__(self):\r\n\r\n object.__init__(self)\r\n \r\n self.dock_direction = 0\r\n self.dock_layer = 0\r\n self.dock_row = 0\r\n self.size = 0\r\n self.min_size = 0\r\n self.resizable = True\r\n self.fixed = False\r\n self.toolbar = False\r\n self.rect = wx.Rect()\r\n self.panes = []",
"def CreateGuideWindows(self):\r\n\r\n self.DestroyGuideWindows()\r\n\r\n self._guides.append(AuiDockingGuideInfo().Left().\r\n Host(AuiSingleDockingGuide(self._frame, wx.LEFT)))\r\n self._guides.append(AuiDockingGuideInfo().Top().\r\n Host(AuiSingleDockingGuide(self._frame, wx.TOP)))\r\n self._guides.append(AuiDockingGuideInfo().Right().\r\n Host(AuiSingleDockingGuide(self._frame, wx.RIGHT)))\r\n self._guides.append(AuiDockingGuideInfo().Bottom().\r\n Host(AuiSingleDockingGuide(self._frame, wx.BOTTOM)))\r\n self._guides.append(AuiDockingGuideInfo().Centre().\r\n Host(AuiCenterDockingGuide(self._frame)))",
"def __init__(self, *args, **kwargs):\n\n if 'style' in kwargs:\n style = kwargs['style']\n\n # This is the default style, as defined\n # in the AuiFloatingFrame constructor\n else:\n style = (wx.FRAME_TOOL_WINDOW |\n wx.FRAME_FLOAT_ON_PARENT |\n wx.FRAME_NO_TASKBAR |\n wx.CLIP_CHILDREN)\n\n if fwidgets.inSSHSession():\n style &= ~wx.FRAME_TOOL_WINDOW\n\n kwargs['style'] = style\n\n super().__init__(*args, **kwargs)",
"def CreateShapesWithStyle(self):\r\n\r\n useAero = (GetManager(self.GetParent()).GetAGWFlags() & AUI_MGR_AERO_DOCKING_GUIDES) != 0\r\n useWhidbey = (GetManager(self.GetParent()).GetAGWFlags() & AUI_MGR_WHIDBEY_DOCKING_GUIDES) != 0\r\n\r\n self._useAero = 0\r\n if useAero:\r\n self._useAero = 1\r\n elif useWhidbey:\r\n self._useAero = 2\r\n \r\n if useAero:\r\n sizeX, sizeY = aeroguideSizeX, aeroguideSizeY\r\n elif useWhidbey:\r\n sizeX, sizeY = whidbeySizeX, whidbeySizeY \r\n else:\r\n sizeX, sizeY = guideSizeX, guideSizeY\r\n\r\n rectLeft = wx.Rect(0, sizeY, sizeY, sizeX)\r\n rectTop = wx.Rect(sizeY, 0, sizeX, sizeY)\r\n rectRight = wx.Rect(sizeY+sizeX, sizeY, sizeY, sizeX)\r\n rectBottom = wx.Rect(sizeY, sizeX + sizeY, sizeX, sizeY)\r\n rectCenter = wx.Rect(sizeY, sizeY, sizeX, sizeX)\r\n \r\n if not self._useAero:\r\n\r\n self.targetLeft = AuiDockingGuideWindow(self, rectLeft, wx.LEFT, True, useAero)\r\n self.targetTop = AuiDockingGuideWindow(self, rectTop, wx.TOP, True, useAero)\r\n self.targetRight = AuiDockingGuideWindow(self, rectRight, wx.RIGHT, True, useAero)\r\n self.targetBottom = AuiDockingGuideWindow(self, rectBottom, wx.BOTTOM, True, useAero)\r\n self.targetCenter = AuiDockingGuideWindow(self, rectCenter, wx.CENTER, True, useAero)\r\n\r\n \r\n # top-left diamond\r\n tld = [wx.Point(rectTop.x, rectTop.y+rectTop.height-8),\r\n wx.Point(rectLeft.x+rectLeft.width-8, rectLeft.y),\r\n rectTop.GetBottomLeft()]\r\n # bottom-left diamond\r\n bld = [wx.Point(rectLeft.x+rectLeft.width-8, rectLeft.y+rectLeft.height),\r\n wx.Point(rectBottom.x, rectBottom.y+8),\r\n rectBottom.GetTopLeft()]\r\n # top-right diamond\r\n trd = [wx.Point(rectTop.x+rectTop.width, rectTop.y+rectTop.height-8),\r\n wx.Point(rectRight.x+8, rectRight.y),\r\n rectRight.GetTopLeft()] \r\n # bottom-right diamond\r\n brd = [wx.Point(rectRight.x+8, rectRight.y+rectRight.height),\r\n wx.Point(rectBottom.x+rectBottom.width, rectBottom.y+8),\r\n rectBottom.GetTopRight()]\r\n\r\n self._triangles = [tld[0:2], bld[0:2],\r\n [wx.Point(rectTop.x+rectTop.width-1, rectTop.y+rectTop.height-8),\r\n wx.Point(rectRight.x+7, rectRight.y)],\r\n [wx.Point(rectRight.x+7, rectRight.y+rectRight.height),\r\n wx.Point(rectBottom.x+rectBottom.width-1, rectBottom.y+8)]]\r\n \r\n region = wx.Region()\r\n region.UnionRect(rectLeft)\r\n region.UnionRect(rectTop)\r\n region.UnionRect(rectRight)\r\n region.UnionRect(rectBottom)\r\n region.UnionRect(rectCenter)\r\n region.UnionRegion(wx.RegionFromPoints(tld))\r\n region.UnionRegion(wx.RegionFromPoints(bld))\r\n region.UnionRegion(wx.RegionFromPoints(trd))\r\n region.UnionRegion(wx.RegionFromPoints(brd))\r\n\r\n elif useAero:\r\n\r\n self._aeroBmp = aero_dock_pane.GetBitmap()\r\n region = wx.RegionFromBitmap(self._aeroBmp)\r\n\r\n self._allAeroBmps = [aero_dock_pane_left.GetBitmap(), aero_dock_pane_top.GetBitmap(),\r\n aero_dock_pane_right.GetBitmap(), aero_dock_pane_bottom.GetBitmap(),\r\n aero_dock_pane_center.GetBitmap(), aero_dock_pane.GetBitmap()]\r\n self._deniedBitmap = aero_denied.GetBitmap()\r\n self._aeroRects = [rectLeft, rectTop, rectRight, rectBottom, rectCenter]\r\n self._valid = True\r\n\r\n elif useWhidbey:\r\n\r\n self._aeroBmp = whidbey_dock_pane.GetBitmap()\r\n region = wx.RegionFromBitmap(self._aeroBmp)\r\n\r\n self._allAeroBmps = [whidbey_dock_pane_left.GetBitmap(), whidbey_dock_pane_top.GetBitmap(),\r\n whidbey_dock_pane_right.GetBitmap(), whidbey_dock_pane_bottom.GetBitmap(),\r\n whidbey_dock_pane_center.GetBitmap(), whidbey_dock_pane.GetBitmap()]\r\n self._deniedBitmap = whidbey_denied.GetBitmap()\r\n self._aeroRects = [rectLeft, rectTop, rectRight, rectBottom, rectCenter]\r\n self._valid = True\r\n \r\n \r\n self.region = region",
"def SetupToolBar( self ):\n tb = self.CreateToolBar( self.TBFLAGS )\n tsize = (24,24)\n tb.ToolBitmapSize = tsize\n open_bmp = wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN, wx.ART_TOOLBAR, tsize)\n tb.AddLabelTool(ID_OPEN, \"Open\", open_bmp, shortHelp=\"Open\", longHelp=\"Open a (c)Profile trace file\")\n tb.AddSeparator()\n# self.Bind(wx.EVT_TOOL, self.OnOpenFile, id=ID_OPEN)\n self.rootViewTool = tb.AddLabelTool(\n ID_ROOT_VIEW, _(\"Root View\"),\n wx.ArtProvider.GetBitmap(wx.ART_GO_HOME, wx.ART_TOOLBAR, tsize),\n shortHelp=_(\"Display the root of the current view tree (home view)\")\n )\n self.rootViewTool = tb.AddLabelTool(\n ID_BACK_VIEW, _(\"Back\"), \n wx.ArtProvider.GetBitmap(wx.ART_GO_BACK, wx.ART_TOOLBAR, tsize),\n shortHelp=_(\"Back to the previously activated node in the call tree\")\n )\n self.upViewTool = tb.AddLabelTool(\n ID_UP_VIEW, _(\"Up\"),\n wx.ArtProvider.GetBitmap(wx.ART_GO_UP, wx.ART_TOOLBAR, tsize),\n shortHelp=_(\"Go one level up the call tree (highest-percentage parent)\")\n )\n tb.AddSeparator()\n # TODO: figure out why the control is sizing the label incorrectly on Linux\n self.percentageViewTool = wx.CheckBox( tb, -1, _(\"Percent \") )\n self.percentageViewTool.SetToolTip( wx.ToolTip(_(\"Toggle display of percentages in list views\")) )\n tb.AddControl( self.percentageViewTool )\n wx.EVT_CHECKBOX( self.percentageViewTool, self.percentageViewTool.GetId(), self.OnPercentageView )\n \n self.packageViewTool = wx.CheckBox( tb, -1, _(\"File View \") )\n self.packageViewTool.SetToolTip( wx.ToolTip(_(\"Switch between call-hierarchy and package/module/function hierarchy\")) )\n tb.AddControl( self.packageViewTool )\n wx.EVT_CHECKBOX( self.packageViewTool, self.packageViewTool.GetId(), self.OnPackageView )\n tb.Realize()",
"def __init__(self, parent):\n super(DummyStageInterface, self).__init__(parent)\n\n open_bmp = wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN, wx.ART_TOOLBAR,\n self.tb_size)\n self.tb.AddLabelTool(wx.ID_OPEN, \"Open\", open_bmp)\n\n self.tb.Realize()",
"def __init__(self, master=None, margin=30):\r\n Frame.__init__(self, master, padx=margin, pady=margin)\r\n self.grid()\r\n self.widgets()\r\n self.behavior()",
"def __init__(self, parent):\r\n\r\n pre = wx.PrePyWindow()\r\n \r\n self._tabs = None\r\n self._rect = wx.Rect(0, 0, 200, 200)\r\n self._tab_ctrl_height = 20\r\n self._tab_rect = wx.Rect() \r\n self._parent = parent\r\n \r\n self.PostCreate(pre)",
"def __init__(self, parent, rect, direction=0, center=False, useAero=False):\r\n\r\n wx.Window.__init__(self, parent, -1, rect.GetPosition(), rect.GetSize(), wx.NO_BORDER)\r\n\r\n self._direction = direction\r\n self._center = center\r\n self._valid = True\r\n self._useAero = useAero\r\n \r\n self._bmp_unfocus, self._bmp_focus = GetDockingImage(direction, useAero, center)\r\n \r\n self._currentImage = self._bmp_unfocus\r\n self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)\r\n \r\n self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)\r\n self.Bind(wx.EVT_PAINT, self.OnPaint)",
"def _create_toolbar ( self, parent, sizer ):\r\n factory = self.factory\r\n if not factory.show_toolbar:\r\n return",
"def __init__(self, parent, id, title):\n wx.Frame.__init__(self, parent, id, title, size=(1200, 560))\n self.SetBackgroundColour(wx.Colour(0, 0, 360))\n self.SetIcon(wx.Icon(gv.ICO_PATH))\n gv.iGeoMgr = self.geoMgr = GeoMgr(self)\n gv.iDCPosMgr = DataCenterMgr(self)\n self.SetSizer(self._buidUISizer())",
"def init_widget(self):\n super(WxDockPane, self).init_widget()\n d = self.declaration\n self.set_title(d.title)\n self.set_title_bar_visible(d.title_bar_visible)\n self.set_title_bar_orientation(d.title_bar_orientation)\n self.set_closable(d.closable)\n self.set_movable(d.movable)\n self.set_floatable(d.floatable)\n self.set_floating(d.floating)\n self.set_dock_area(d.dock_area)\n self.set_allowed_dock_areas(d.allowed_dock_areas)\n widget = self.widget\n widget.Bind(EVT_DOCK_PANE_FLOATED, self.on_floated)\n widget.Bind(EVT_DOCK_PANE_DOCKED, self.on_docked)\n widget.Bind(EVT_DOCK_PANE_CLOSED, self.on_closed)",
"def __init__(self, parent):\r\n Frame.__init__(self, parent) \r\n \r\n self.parent = parent\r\n self.initUI()",
"def Show(self, show=True):\r\n \r\n super(AuiDockingHintWindow, self).Show(show)\r\n if wx.Platform == '__WXMAC__':\r\n # Need to manually do layout since its a borderless frame.\r\n self.Layout()",
"def init_toolbar(self):\n raise NotImplementedError",
"def InitNotebook(self, agwStyle):\r\n\r\n self.SetName(\"AuiNotebook\")\r\n self._agwFlags = agwStyle\r\n\r\n self._popupWin = None\r\n self._naviIcon = None\r\n self._imageList = None\r\n self._last_drag_x = 0\r\n \r\n self._normal_font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)\r\n self._selected_font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)\r\n self._selected_font.SetWeight(wx.BOLD)\r\n\r\n self.SetArtProvider(TA.AuiDefaultTabArt())\r\n\r\n self._dummy_wnd = wx.Window(self, wx.ID_ANY, wx.Point(0, 0), wx.Size(0, 0))\r\n self._dummy_wnd.SetSize((200, 200))\r\n self._dummy_wnd.Show(False)\r\n\r\n self._mgr.SetManagedWindow(self)\r\n self._mgr.SetAGWFlags(AUI_MGR_DEFAULT)\r\n self._mgr.SetDockSizeConstraint(1.0, 1.0) # no dock size constraint\r\n\r\n self._mgr.AddPane(self._dummy_wnd, framemanager.AuiPaneInfo().Name(\"dummy\").Bottom().CaptionVisible(False).Show(False))\r\n self._mgr.Update()\r\n\r\n self.Bind(wx.EVT_SIZE, self.OnSize)\r\n self.Bind(wx.EVT_CHILD_FOCUS, self.OnChildFocusNotebook)\r\n self.Bind(EVT_AUINOTEBOOK_PAGE_CHANGING, self.OnTabClicked,\r\n id=AuiBaseTabCtrlId, id2=AuiBaseTabCtrlId+500)\r\n self.Bind(EVT_AUINOTEBOOK_BEGIN_DRAG, self.OnTabBeginDrag,\r\n id=AuiBaseTabCtrlId, id2=AuiBaseTabCtrlId+500)\r\n self.Bind(EVT_AUINOTEBOOK_END_DRAG, self.OnTabEndDrag,\r\n id=AuiBaseTabCtrlId, id2=AuiBaseTabCtrlId+500)\r\n self.Bind(EVT_AUINOTEBOOK_DRAG_MOTION, self.OnTabDragMotion,\r\n id=AuiBaseTabCtrlId, id2=AuiBaseTabCtrlId+500)\r\n self.Bind(EVT_AUINOTEBOOK_CANCEL_DRAG, self.OnTabCancelDrag,\r\n id=AuiBaseTabCtrlId, id2=AuiBaseTabCtrlId+500) \r\n self.Bind(EVT_AUINOTEBOOK_BUTTON, self.OnTabButton,\r\n id=AuiBaseTabCtrlId, id2=AuiBaseTabCtrlId+500)\r\n self.Bind(EVT_AUINOTEBOOK_TAB_MIDDLE_DOWN, self.OnTabMiddleDown,\r\n id=AuiBaseTabCtrlId, id2=AuiBaseTabCtrlId+500)\r\n self.Bind(EVT_AUINOTEBOOK_TAB_MIDDLE_UP, self.OnTabMiddleUp,\r\n id=AuiBaseTabCtrlId, id2=AuiBaseTabCtrlId+500)\r\n self.Bind(EVT_AUINOTEBOOK_TAB_RIGHT_DOWN, self.OnTabRightDown,\r\n id=AuiBaseTabCtrlId, id2=AuiBaseTabCtrlId+500)\r\n self.Bind(EVT_AUINOTEBOOK_TAB_RIGHT_UP, self.OnTabRightUp,\r\n id=AuiBaseTabCtrlId, id2=AuiBaseTabCtrlId+500)\r\n self.Bind(EVT_AUINOTEBOOK_BG_DCLICK, self.OnTabBgDClick,\r\n id=AuiBaseTabCtrlId, id2=AuiBaseTabCtrlId+500)\r\n self.Bind(EVT_AUINOTEBOOK_TAB_DCLICK, self.OnTabDClick,\r\n id=AuiBaseTabCtrlId, id2=AuiBaseTabCtrlId+500)\r\n\r\n self.Bind(wx.EVT_NAVIGATION_KEY, self.OnNavigationKeyNotebook)",
"def _InitUI( self, two_axes = False ):\n dpis = wx.ScreenDC().GetPPI()\n size = ( WIDGET_PREF_SIZE[ 0 ] / dpis[ 0 ], WIDGET_PREF_SIZE[ 1 ] / dpis[ 0 ] )\n self.fig = Figure( facecolor = '#ececec', figsize = size, dpi = dpis[ 0 ] )\n\n self._InitAxes()\n# if two_axes:\n# self.ax = self.fig.add_axes([ 0.1, 0.1, 0.85, 0.65 ])\n# self.ax2 = self.ax.twiny()\n# else:\n# self.ax = self.fig.add_subplot( 111 )\n self.canvas = FigureCanvas( self, -1, self.fig )\n self.canvas.SetMinClientSize( wx.Size( 200, 200 ) )\n self.toolbar = NavigationToolbar( self.canvas )\n #self.toolbar.Realize()\n self.toolbar.SetBackgroundColour( wx.Colour( 236, 236, 236, 255 ) )\n self.toolbar.Show( False )\n\n sizer = wx.BoxSizer( wx.VERTICAL )\n sizer.Add( self.toolbar, 0, wx.LEFT | wx.TOP | wx.BOTTOM | wx.EXPAND, 1 )\n sizer.Add( self.canvas, 1, wx.LEFT | wx.TOP | wx.BOTTOM | wx.EXPAND, 1 )\n self.SetSizer( sizer )\n\n self.callbackIds[ 'button_release_event' ] = \\\n self.canvas.mpl_connect( 'button_release_event', self._OnMplMouseRelease )\n self.callbackIds[ 'motion_notify_event' ] = \\\n self.canvas.mpl_connect( 'motion_notify_event', self._OnMplMouseMotion )\n\n self.Bind( wx.EVT_CLOSE, self._OnClose )\n self.Bind( wx.EVT_CONTEXT_MENU, self._OnContextMenu )\n self.Bind( wx.EVT_SIZE, self._OnSize )\n\n self.timer = wx.Timer( self, TIMERID_RESIZE )\n self.Bind( wx.EVT_TIMER, self._OnTimer )",
"def dockControl(*args, allowedArea: Union[AnyStr, List[AnyStr], bool]=\"all\", annotation:\n Union[AnyStr, bool]=\"\", area: Union[AnyStr, bool]=\"\", backgroundColor:\n Union[List[float, float, float], bool]=None, closeCommand: Script=None,\n content: Union[AnyStr, bool]=\"\", defineTemplate: AnyStr=\"\", docTag:\n Union[AnyStr, bool]=\"\", dockStation: AnyStr=\"\", dragCallback: Script=None,\n dropCallback: Script=None, enable: bool=True, enableBackground: bool=True,\n enableKeyboardFocus: bool=True, enablePopupOption: bool=True, exists: bool=True,\n fixedHeight: bool=True, fixedWidth: bool=True, floatChangeCommand: Script=None,\n floating: bool=True, fullPathName: bool=True, height: Union[int, bool]=0,\n highlightColor: Union[List[float, float, float], bool]=None, isObscured:\n bool=True, label: Union[AnyStr, bool]=\"\", manage: bool=True, moveable:\n bool=True, noBackground: bool=True, numberOfPopupMenus: bool=True, parent:\n Union[AnyStr, bool]=\"\", popupMenuArray: bool=True, preventOverride: bool=True,\n r: bool=True, retain: bool=True, sizeable: bool=True, splitLayout: AnyStr=\"\",\n state: Union[AnyStr, bool]=\"\", statusBarMessage: AnyStr=\"\", useTemplate:\n AnyStr=\"\", visible: bool=True, visibleChangeCommand: Union[Script, bool]=None,\n width: Union[int, bool]=0, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass",
"def init_layout(self):\n super(WxDockPane, self).init_layout()\n self.widget.SetDockWidget(self.dock_widget())",
"def ToolbarPane(self):\r\n \r\n self.DefaultPane()\r\n state = self.state\r\n \r\n state |= (self.optionToolbar | self.optionGripper)\r\n state &= ~(self.optionResizable | self.optionCaption | self.optionCaptionLeft)\r\n \r\n if self.dock_layer == 0:\r\n self.dock_layer = 10\r\n\r\n self.state = state\r\n \r\n return self",
"def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize,\r\n style=wx.NO_BORDER|wx.WANTS_CHARS|wx.TAB_TRAVERSAL):\r\n\r\n wx.PyControl.__init__(self, parent, id, pos, size, style, name=\"AuiTabCtrl\")\r\n AuiTabContainer.__init__(self, parent)\r\n\r\n self._click_pt = wx.Point(-1, -1)\r\n self._is_dragging = False\r\n self._hover_button = None\r\n self._pressed_button = None\r\n self._drag_image = None\r\n self._drag_img_offset = (0, 0)\r\n self._on_button = False\r\n \r\n self.Bind(wx.EVT_PAINT, self.OnPaint)\r\n self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)\r\n self.Bind(wx.EVT_SIZE, self.OnSize)\r\n self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)\r\n self.Bind(wx.EVT_LEFT_DCLICK, self.OnLeftDClick)\r\n self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)\r\n self.Bind(wx.EVT_MIDDLE_DOWN, self.OnMiddleDown)\r\n self.Bind(wx.EVT_MIDDLE_UP, self.OnMiddleUp)\r\n self.Bind(wx.EVT_RIGHT_DOWN, self.OnRightDown)\r\n self.Bind(wx.EVT_RIGHT_UP, self.OnRightUp)\r\n self.Bind(wx.EVT_SET_FOCUS, self.OnSetFocus)\r\n self.Bind(wx.EVT_KILL_FOCUS, self.OnKillFocus)\r\n self.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)\r\n self.Bind(wx.EVT_MOUSE_CAPTURE_LOST, self.OnCaptureLost)\r\n self.Bind(wx.EVT_MOTION, self.OnMotion)\r\n self.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeaveWindow)\r\n self.Bind(EVT_AUINOTEBOOK_BUTTON, self.OnButton)",
"def UpdateDockGuide(self, pos):\r\n\r\n self.target.UpdateDockGuide(pos)",
"def toolBar(*args, allowedArea: Union[AnyStr, List[AnyStr], bool]=\"all\", annotation:\n Union[AnyStr, bool]=\"\", area: Union[AnyStr, bool]=\"\", backgroundColor:\n Union[List[float, float, float], bool]=None, content: Union[AnyStr, bool]=\"\",\n defineTemplate: AnyStr=\"\", docTag: Union[AnyStr, bool]=\"\", dragCallback:\n Script=None, dropCallback: Script=None, enable: bool=True, enableBackground:\n bool=True, enableKeyboardFocus: bool=True, exists: bool=True, fullPathName:\n bool=True, height: Union[int, bool]=0, highlightColor: Union[List[float, float,\n float], bool]=None, isObscured: bool=True, label: Union[AnyStr, bool]=\"\", manage:\n bool=True, noBackground: bool=True, numberOfPopupMenus: bool=True, parent:\n Union[AnyStr, bool]=\"\", popupMenuArray: bool=True, preventOverride: bool=True,\n statusBarMessage: AnyStr=\"\", useTemplate: AnyStr=\"\", visible: bool=True,\n visibleChangeCommand: Union[Script, bool]=None, width: Union[int, bool]=0, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass",
"def __init__(self, parent, id):\n \n # init frame\n wx.Frame.__init__(self, parent, -1, \"Papyrus\", size=(800, 500), style=wx.DEFAULT_FRAME_STYLE | wx.NO_FULL_REPAINT_ON_RESIZE)\n \n # init error handler\n sys.excepthook = self._on_error\n \n # init library\n self._library = None\n \n # set icon\n icons = wx.IconBundle()\n icons.AddIcon(images.APP_ICON_16)\n icons.AddIcon(images.APP_ICON_32)\n icons.AddIcon(images.APP_ICON_48)\n icons.AddIcon(images.APP_ICON_128)\n icons.AddIcon(images.APP_ICON_256)\n self.SetIcons(icons)\n \n # init menu bar\n self._menu_bar = MenuBar()\n if config.SETTINGS['menu_bar_enabled']:\n self.SetMenuBar(self._menu_bar)\n \n # init main ui\n self._make_ui()\n \n # set size\n self.SetSize((config.SETTINGS['app_width'], config.SETTINGS['app_height']))\n self.SetMinSize((800, 500))\n \n # maximize\n if config.SETTINGS['app_maximized']:\n print(config.SETTINGS)\n self.Maximize()\n \n # bind events\n self._bind_events()\n \n # set hot keys\n self.SetAcceleratorTable(wx.AcceleratorTable(ACCELERATORS))\n \n # show frame\n self.Layout()\n self.Centre(wx.BOTH)\n self.Show(True)"
] | [
"0.78029877",
"0.76468927",
"0.75003105",
"0.67746687",
"0.67715925",
"0.65943986",
"0.65633965",
"0.652213",
"0.63551587",
"0.6119899",
"0.5991007",
"0.5870099",
"0.58692646",
"0.5821341",
"0.5789795",
"0.57829094",
"0.57701516",
"0.5749214",
"0.5741523",
"0.572504",
"0.57011175",
"0.56816614",
"0.5679046",
"0.56594783",
"0.5638076",
"0.5620906",
"0.56022036",
"0.5546945",
"0.5544494",
"0.55288696"
] | 0.8088298 | 0 |
Generate adversarial via CW optimization. | def make_cw(env, X_data, epochs=50, eps=0.1, batch_size=1):
print('\nMaking adversarials via CW')
n_sample = X_data.shape[0]
n_batch = int((n_sample + batch_size - 1) / batch_size)
X_adv = np.empty_like(X_data)
for batch in range(n_batch):
with Timer('Batch {0}/{1} '.format(batch + 1, n_batch)):
end = min(n_sample, (batch+1) * batch_size)
start = end - batch_size
feed_dict = {
env.x_fixed: X_data[start:end],
env.adv_eps: eps,
env.adv_y: np.random.choice(n_classes)}
# reset the noise before every iteration
env.sess.run(env.noise.initializer)
for epoch in range(epochs):
env.sess.run(env.adv_train_op, feed_dict=feed_dict)
xadv = env.sess.run(env.xadv, feed_dict=feed_dict)
X_adv[start:end] = xadv
return X_adv | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def optimize(w, b, X, Y, num_iterations,learning_rate,print_cost = False):\n costs = []\n for i in range(num_iterations):\n\n # Cost and gradient calculation (≈ 1-4 lines of code)\n ### START CODE HERE ###\n grads,cost = propagate(w,b,X,Y)\n ### END CODE HERE ###\n\n # Retrieve derivatives from grads\n dw = grads[\"dw\"]\n db = grads[\"db\"]\n\n # update rule (≈ 2 lines of code)\n ### START CODE HERE ###\n w = w - learning_rate*dw\n b = b - learning_rate*db\n ### END CODE HERE ###\n\n # Record the costs\n if i % 100 == 0:\n costs.append(cost)\n\n # Print the cost every 100 training examples\n if print_cost and i%100==0:\n print(\"Cost after iteration %i: %f\"%(i,cost))\n\n params = {\n \"w\":w,\n \"b\":b\n }\n grads = {\n \"dw\":dw,\n \"db\":db\n }\n return params,grads,costs",
"def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost=False):\n costs = []\n\n for i in range(num_iterations):\n\n grads, cost = propagate(w, b, X, Y)\n\n dw = grads[\"dw\"]\n db = grads[\"db\"]\n\n w = w - learning_rate * dw\n b = b - learning_rate * db\n\n # Record the costs\n if i % 100 == 0:\n costs.append(cost)\n\n # Print the cost every 100 training examples\n if print_cost and i % 100 == 0:\n print(\"Cost after iteration %i: %f\" % (i, cost))\n\n params = {\"w\": w,\n \"b\": b}\n\n grads = {\"dw\": dw,\n \"db\": db}\n\n return params, grads, costs",
"def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):\n\n costs = []\n\n for i in range(num_iterations):\n\n grads, cost = propagate(w, b, X, Y)\n\n dw = grads[\"dw\"]\n db = grads[\"db\"]\n\n w = w - learning_rate * dw\n b = b - learning_rate * db\n\n if i % 100 == 0:\n costs.append(cost)\n\n # Print the cost every 100 training iterations\n if print_cost and i % 100 == 0:\n print (\"Cost after iteration %i: %f\" %(i, cost))\n\n params = {\"w\": w,\n \"b\": b}\n\n grads = {\"dw\": dw,\n \"db\": db}\n\n return params, grads, costs",
"def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):\n costs = []\n for i in range(num_iterations):\n # Cost and gradient calculation \n grads, cost = propagate(w, b, X, Y)\n # Retrieve derivatives from grads\n dw = grads[\"dw\"]\n db = grads[\"db\"]\n # update rule (≈ 2 lines of code)\n w = w-learning_rate*dw\n b = b-learning_rate*db\n # Record the costs\n if i % 100 == 0:\n costs.append(cost)\n # Print the cost every 100 training examples\n if print_cost and i % 100 == 0:\n print (\"Cost after iteration %i: %f\" %(i, cost))\n params = {\"w\": w,\n \"b\": b}\n grads = {\"dw\": dw,\n \"db\": db}\n return params, grads, costs",
"def __init__(self,\n weight_decay,\n learning_rate=0.001,\n beta1=0.9,\n beta2=0.999,\n epsilon=1e-8,\n use_locking=False,\n name=\"AdamW\"):\n super(AdamWOptimizer, self).__init__(\n weight_decay,\n learning_rate=learning_rate,\n beta1=beta1,\n beta2=beta2,\n epsilon=epsilon,\n use_locking=use_locking,\n name=name)",
"def optimize(w, b, X, Y, num_iterations=100, learning_rate=0.009, print_cost=False):\n w = copy.deepcopy(w)\n b = copy.deepcopy(b)\n\n costs = []\n\n for i in range(num_iterations):\n grads, cost = propagate(w, b, X, Y)\n dw = grads[\"dw\"]\n db = grads[\"db\"]\n w = w - learning_rate * dw\n b = b - learning_rate * db\n\n if i % 100 == 0:\n costs.append(cost)\n if print_cost:\n print(\"Cost after iteration %i: %f\" % (i, cost))\n\n params = {\"w\": w, \"b\": b}\n\n grads = {\"dw\": dw, \"db\": db}\n\n return params, grads, costs",
"def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):\n \n costs = []\n \n for i in range(num_iterations):\n \n \n # Cost and gradient calculation \n grads, cost = propagate(w, b, X, Y)\n\n # Retrieve derivatives from grads\n dw = grads[\"dw\"]\n db = grads[\"db\"]\n \n # update rule (Gradient Descent)\n w = w - learning_rate * dw\n b = b - learning_rate * db\n\n # Record the change of costs\n if i % 100 == 0:\n costs.append(cost)\n \n # Print the cost every 100 training iterations\n if print_cost and i % 100 == 0:\n print (\"Cost after iteration %i: %f\" %(i, cost))\n \n params = {\"w\": w,\n \"b\": b}\n \n grads = {\"dw\": dw,\n \"db\": db}\n \n return params, grads, costs",
"def main():\n parser = argparse.ArgumentParser(description=None)\n parser.add_argument('--in', type=str, dest=\"input\",\n required=True,\n help=\"Input audio .wav file, at 16KHz\")\n parser.add_argument('--target', type=str,\n required=True,\n help=\"Target transcription\")\n parser.add_argument('--out', type=str,\n required=True,\n help=\"Where to put the adversarial example\")\n args = parser.parse_args()\n with tf.Session() as sess:\n audios = []\n lengths = []\n\n # Just load one input\n for i in range(1):\n fs, audio = wav.read(args.input)\n assert fs == 16000\n print('source dB', 20*np.log10(np.max(np.abs(audio))))\n audios.append(list(audio))\n lengths.append(len(audio))\n audios = np.array(audios)\n maxlen = len(audios[0])\n\n phrase = args.target\n\n # Set up the attack class and run it\n attack = Attack(sess, 'CTC', len(phrase), maxlen, batch_size=len(audios))\n deltas = attack.attack(audios,\n lengths,\n [[toks.index(x) for x in phrase]]*len(audios))\n # And now save it to the desired output\n for i in range(1):\n wav.write(args.out, 16000,\n np.array(np.clip(np.round(deltas[i][:lengths[i]]),\n -2**15, 2**15-1),dtype=np.int16))",
"def optimize(w, b, x, y, num_iterations, learning_rate, print_cost=False):\n\n costs = []\n\n for i in range(num_iterations):\n\n # Cost and gradient calculation\n grads, cost = propagate(w, b, x, y)\n\n # Retrieve derivatives from grads\n dw = grads[\"dw\"]\n db = grads[\"db\"]\n\n # Update rule\n w = w - (dw * learning_rate)\n b = b - (db * learning_rate)\n\n # Record the costs\n if i % 100 == 0:\n costs.append(cost)\n\n # Print the cost every 100 training iterations\n if print_cost and i % 100 == 0:\n print(\"Cost after iteration %i: %f\" % (i, cost))\n\n params = {\"w\": w,\n \"b\": b}\n\n grads = {\"dw\": dw,\n \"db\": db}\n\n return params, grads, costs",
"def create_optimizer(self, context, optimizer, host):\n pass",
"def nnObjFunction(params, *args):\r\n \r\n\r\n \r\n n_input, n_hidden, n_class, training_data, training_label, lambdaval = args\r\n w1 = params[0:n_hidden * (n_input + 1)].reshape( (n_hidden, (n_input + 1)))\r\n w2 = params[(n_hidden * (n_input + 1)):].reshape((n_class, (n_hidden + 1)))\r\n\r\n\r\n rowsToChange=xrange(len(training_label))\r\n \r\n oneKEncoding = np.zeros((len(training_label),10))\r\n \r\n for x,y in izip(rowsToChange,training_label):\r\n oneKEncoding[x,int(y)]=1\r\n \r\n training_label=oneKEncoding\r\n\r\n trans_w1=w1.T\r\n trans_w2=w2.T\r\n \r\n #add bias 1\r\n x=np.column_stack((training_data,np.ones(len(training_data))))\r\n #equation1\r\n eq1=np.dot(x,trans_w1)\r\n #equation 2\r\n z=sigmoid(eq1)\r\n #add bias 1\r\n z=np.column_stack((z,np.ones(len(z))))\r\n #equation 3\r\n eq3=np.dot(z,trans_w2)\r\n #equation 4\r\n o=sigmoid(eq3)\r\n\r\n #-----------------------------------------Calculations for gradient weight vector 2---------------------------------------------\r\n \r\n delta=np.subtract(o,training_label)\r\n eq5=np.sum(np.square(delta))\r\n\r\n dabba=(training_label-o)*(1-o)*o\r\n \r\n grad_w2=np.multiply(-1,np.dot(dabba.T,z)) \r\n \r\n\r\n #-----------------------------------------Calculations for gradient weight vector 1---------------------------------------------\r\n\r\n one_minus_z_into_z = (1-z)*z\r\n \r\n \r\n multiply_by_summation = one_minus_z_into_z*np.dot(dabba,w2)\r\n \r\n grad_w1_without_minus_one = np.dot(np.transpose(multiply_by_summation),x)\r\n \r\n\r\n grad_w1=np.multiply(-1,grad_w1_without_minus_one)\r\n \r\n grad_w1 = np.delete(grad_w1, n_hidden,0) \r\n \r\n\r\n #-----------------------------------------Calculations for gradient object value----------------------------------------\r\n\r\n \r\n obj_val=eq5/len(training_data)\r\n \r\n #-----------------------------------------Regularization of gradient val and weight vector-------------------------------\r\n \r\n obj_val = obj_val+ (lambdaval/(2*len(training_data)))*( np.sum(np.square(w1)) + np.sum(np.square(w2)))\r\n grad_w2 = (grad_w2 + lambdaval*w2 )/ len(training_data) \r\n grad_w1 = (grad_w1 + lambdaval*w1 )/ len(training_data) \r\n \r\n \r\n\r\n #-----------------------------------------Concatenate both the weight vectors---------------------------------------------\r\n\r\n obj_grad = np.array([])\r\n obj_grad = np.concatenate((grad_w1.flatten(), grad_w2.flatten()),0)\r\n return (obj_val,obj_grad)",
"def create_model_optimizer(net,alpha):\n optimizer = chainer.optimizers.Adam(alpha=alpha)\n optimizer.setup(net)\n return optimizer",
"def build_model(config, src_field, trg_field, device):\n enc = Encoder(len(src_field.vocab), \n config.EMBED, \n config.HIDDEN, \n config.ENC_N_LAYER, \n layernorm=config.L_NORM, \n bidirec=True).to(device)\n\n dec = Decoder(len(trg_field.vocab), \n config.EMBED, \n enc.n_direction*config.HIDDEN, \n config.DEC_N_LAYER, \n drop_rate=config.DROP_RATE, \n method=config.METHOD, \n layernorm=config.L_NORM, \n sos_idx=trg_field.vocab.stoi['<s>'],\n teacher_force=config.TF, \n return_w=config.RETURN_W, \n device=device).to(device)\n \n loss_function = nn.CrossEntropyLoss(ignore_index=trg_field.vocab.stoi['<pad>'])\n if config.OPTIM.lower() == 'adam':\n enc_optimizer = optim.Adam(enc.parameters(), \n lr=config.LR, \n weight_decay=config.LAMBDA)\n dec_optimizer = optim.Adam(dec.parameters(), \n lr=config.LR * config.DECLR, \n weight_decay=config.LAMBDA)\n elif config.OPTIM.lower() == 'adelta':\n enc_optimizer = optim.Adadelta(enc.parameters(),\n weight_decay=config.LAMBDA)\n dec_optimizer = optim.Adadelta(dec.parameters(),\n weight_decay=config.LAMBDA)\n elif config.OPTIM.lower() == 'sgd':\n enc_optimizer = optim.SGD(enc.parameters(),\n lr=config.LR,\n weight_decay=config.LAMBDA)\n dec_optimizer = optim.SGD(dec.parameters(),\n lr=config.LR * config.DECLR,\n weight_decay=config.LAMBDA)\n enc_scheduler = optim.lr_scheduler.MultiStepLR(gamma=0.1,\n milestones=[int(config.STEP / 4),\n int(2 * config.STEP / 3)],\n optimizer=enc_optimizer)\n dec_scheduler = optim.lr_scheduler.MultiStepLR(gamma=0.1,\n milestones=[int(config.STEP / 4), \n int(2 * config.STEP / 3)],\n optimizer=dec_optimizer)\n print(\"Building Model ...\")\n return enc, dec, loss_function, enc_optimizer, dec_optimizer, enc_scheduler, dec_scheduler",
"def apply_dw(self, dw):\n\n # list of trainable params\n param_names = [\"w_out\", \"b_out\", \"w_link\", \"w_in\", \"b_in\"]\n\n for param_name in param_names:\n self.__dict__[param_name] = self.__getattribute__(\n param_name) - LEARNING_RATE*dw[\"d\" + param_name]",
"def __init__(self,\n weight_decay,\n learning_rate,\n momentum,\n use_locking=False,\n name=\"MomentumW\",\n use_nesterov=False):\n super(MomentumWOptimizer, self).__init__(\n weight_decay,\n learning_rate=learning_rate,\n momentum=momentum,\n use_locking=use_locking,\n name=name,\n use_nesterov=use_nesterov)",
"def __init__(self, nh, nc, ne, de, cs, normal=True, longdependence=False,optimization=None):\n ### emb the embedding matrix for all the vocabulary\n self.emb = theano.shared(name='embeddings',\n value=0.2 * numpy.random.uniform(-1.0, 1.0,\n (ne+1, de))\n # add one for padding at the end\n .astype(theano.config.floatX))\n ### weight for input \n self.wxi = theano.shared(name='wxi',\n value=0.2 * numpy.random.uniform(-1.0, 1.0,\n (de * cs, nh))\n .astype(theano.config.floatX))\n self.wxf = theano.shared(name='wxf',\n value=0.2 * numpy.random.uniform(-1.0, 1.0,\n (de * cs, nh))\n .astype(theano.config.floatX))\n self.wxc = theano.shared(name='wxc',\n value=0.2 * numpy.random.uniform(-1.0, 1.0,\n (de * cs, nh))\n .astype(theano.config.floatX)) \n self.wxo = theano.shared(name='wxo',\n value=0.2 * numpy.random.uniform(-1.0, 1.0,\n (de * cs, nh))\n .astype(theano.config.floatX)) \n \n ### weight for t-1 hidden layer \n self.whi = theano.shared(name='whi',\n value=0.2 * numpy.random.uniform(-1.0, 1.0,\n (nh, nh))\n .astype(theano.config.floatX))\n self.whf = theano.shared(name='whf',\n value=0.2 * numpy.random.uniform(-1.0, 1.0,\n (nh, nh))\n .astype(theano.config.floatX)) \n self.whc = theano.shared(name='whc',\n value=0.2 * numpy.random.uniform(-1.0, 1.0,\n (nh, nh))\n .astype(theano.config.floatX))\n self.who = theano.shared(name='who',\n value=0.2 * numpy.random.uniform(-1.0, 1.0,\n (nh, nh))\n .astype(theano.config.floatX)) \n \n ### weight for memeory cell [diagonal matrix]\n ## Initialization requirement : initialize as the diagonal matrix. becuase this the proportion of\n ## the memory from history/current state for each neuron. \n self.wci = theano.shared(name='wci',\n value= 0.2 * numpy.diag(numpy.random.uniform(0.0, 1.0,\n nh))\n .astype(theano.config.floatX))\n self.wcf = theano.shared(name='wcf',\n value= 0.2 * numpy.diag(numpy.random.uniform(0.0, 1.0,\n nh))\n .astype(theano.config.floatX))\n self.wco = theano.shared(name='wco',\n value= 0.2 * numpy.diag(numpy.random.uniform(0.0, 1.0,\n nh))\n .astype(theano.config.floatX))\n \n ### weight for the output layer \n self.w = theano.shared(name='w',\n value=0.2 * numpy.random.uniform(-1.0, 1.0,\n (nh, nc))\n .astype(theano.config.floatX))\n \n ### bias\n self.bi = theano.shared(name='bi',\n value=numpy.zeros(nh,\n dtype=theano.config.floatX))\n self.bf = theano.shared(name='bf',\n value=numpy.zeros(nh,\n dtype=theano.config.floatX))\n self.bc = theano.shared(name='bc',\n value=numpy.zeros(nh,\n dtype=theano.config.floatX))\n self.bo = theano.shared(name='bo',\n value=numpy.zeros(nh,\n dtype=theano.config.floatX))\n self.b = theano.shared(name='b',\n value=numpy.zeros(nc,\n dtype=theano.config.floatX))\n \n ### Initialization for recurrence\n self.h0 = theano.shared(name='h0',\n value=numpy.zeros(nh,\n dtype=theano.config.floatX))\n self.c0 = theano.shared(name='c0',\n value=numpy.zeros(nh,\n dtype=theano.config.floatX))\n \n # bundle\n self.params = [self.wxi,self.wxf,self.wxc,self.wxo ,\\\n self.whi,self.whf,self.whc,self.who ,\\\n self.wco,\\\n self.bi,self.bf,self.bc,self.bo ,\\\n self.w,self.b, self.h0,self.c0]\n \n # word embeding: use vector of [de] to represent each wrod [trained parameter]\n idxs = T.imatrix()\n x = self.emb[idxs].reshape((idxs.shape[0], de*cs)) ## flatten the matrix in to (n_step,dimension * context widnow)\n y_sentence = T.ivector('y_sentence') # labels [ column ]\n \n ################################## Different recurrent Method #############################\n ########################### AR + Bi-direction LSTM + Attention + None #####################\n if longdependence=='AR': \n ## goal: give longer dependence on the otuput sequence, by directly using the previous hidden layer \n ## value on the output layer, in addition to the hidden layer. [Ar[1]-lstm + lag_Moving[3]]\n self.war0 = theano.shared(name='w0',\n value=numpy.diag(numpy.ones(nc))\n .astype(theano.config.floatX))\n self.war1 = theano.shared(name='w1',\n value=numpy.zeros((nc, nc))\n .astype(theano.config.floatX)) \n self.war2 = theano.shared(name='w2',\n value=numpy.zeros((nc, nc))\n .astype(theano.config.floatX))\n \n self.params = [self.wxi,self.wxf,self.wxc,self.wxo ,\\\n self.whi,self.whf,self.whc,self.who ,\\\n self.wco,\\\n self.bi,self.bf,self.bc,self.bo ,\\\n self.w,self.b, self.h0,self.c0,\\\n self.war0,self.war1,self.war2] \n \n def recurrence(x_t,h_tm1,h_tm2,c_tm1):\n i_t = T.nnet.sigmoid( T.dot(x_t, self.wxi) + T.dot(h_tm1, self.whi) + T.dot(c_tm1, self.wci) + self.bi )\n f_t = T.nnet.sigmoid( T.dot(x_t, self.wxf) + T.dot(h_tm1, self.whf) + T.dot(c_tm1, self.wcf) + self.bf )\n \n c_t = T.tanh(T.dot(x_t, self.wxc) + T.dot(h_tm1, self.whc) + self.bc)\n c_t = f_t * c_tm1+ i_t * c_t\n \n o_t = T.nnet.sigmoid( T.dot(x_t, self.wxo) + T.dot(h_tm1 , self.who) + T.dot(c_t, self.wco) + self.bo )\n \n h_t = o_t * T.tanh(c_t) \n ## change dimension from nh to nc\n p_t_0 = T.dot(h_t, self.w)\n p_t_1 = T.dot(h_tm1, self.w) \n p_t_2 = T.dot(h_tm2, self.w) \n\n ## compute output label dependency from history output\n q_t_0 = T.dot(p_t_0,self.war0) \n q_t_1 = T.dot(p_t_1,self.war1) \n q_t_2 = T.dot(p_t_2,self.war2) \n\n ## incorporate moving average\n s_t = self.b + q_t_0 + q_t_1 +q_t_2\n return [h_t,h_tm1,c_t,s_t]\n \n [h,_,c,s], _ = theano.scan(fn=recurrence,\n sequences=x,\n outputs_info=[self.h0,self.h0,self.c0, None], \n n_steps=x.shape[0]) \n s = T.nnet.softmax(s) \n p_y_given_x_sentence = s\n \n else:\n def recurrence(x_t, h_tm1,c_tm1):\n i_t = T.nnet.sigmoid( T.dot(x_t, self.wxi) + T.dot(h_tm1, self.whi) + T.dot(c_tm1, self.wci) + self.bi )\n f_t = T.nnet.sigmoid( T.dot(x_t, self.wxf) + T.dot(h_tm1, self.whf) + T.dot(c_tm1, self.wcf) + self.bf )\n \n c_t = T.tanh(T.dot(x_t, self.wxc) + T.dot(h_tm1, self.whc) + self.bc)\n c_t = f_t * c_tm1+ i_t * c_t\n \n o_t = T.nnet.sigmoid( T.dot(x_t, self.wxo) + T.dot(h_tm1, self.who) + T.dot(c_t, self.wco) + self.bo )\n \n h_t = o_t * T.tanh(c_t) \n s_t = T.dot(h_t, self.w) + self.b\n return [h_t, c_t ,s_t]\n\n #shape h[x.shape[0],nh],s[x.shape[0],1,nc]\n [h,c,s], _ = theano.scan(fn=recurrence,\n sequences=[x],\n outputs_info=[self.h0,self.c0, None], \n n_steps=x.shape[0])\n\n s = T.nnet.softmax(s)\n p_y_given_x_sentence = s\n\n ## get the highest probability\n y_pred = T.argmax(p_y_given_x_sentence, axis=1)\n\n # cost and gradients and learning rate\n lr = T.scalar('lr')\n\n sentence_nll = -T.mean(T.log(p_y_given_x_sentence)\n [T.arange(x.shape[0]), y_sentence])\n \n ## used for SGD\n sentence_gradients = T.grad(sentence_nll, self.params)\n if optimization ==None:\n sentence_updates = OrderedDict((p, p - lr*g)\n for p, g in\n zip(self.params, sentence_gradients))\n elif optimization =='Adagrad':\n sentence_updates = OrderedDict(adagrad(params=self.params, gparams=sentence_gradients, learning_rate = lr, epsilon = 1e-6))\n else:\n pass\n \n self.normalize = theano.function(inputs=[],\n updates={self.emb:\n self.emb /\n T.sqrt((self.emb**2)\n .sum(axis=1))\n \n .dimshuffle(0, 'x')})\n self.normal = normal\n\n ## add momentum and ada\n self.classify = theano.function(inputs=[idxs], outputs=y_pred)\n self.sentence_train = theano.function(inputs=[idxs, y_sentence, lr],\n outputs=sentence_nll,\n updates=sentence_updates)",
"def propagate(w, b, X, Y):\n m = X.shape[1]\n z = np.dot(w.T, X) + b\n A = sigmoid(z)\n cost = -1 / m * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A))\n dw = 1 / m * np.dot(X, (A - Y).T)\n db = 1 / m * np.sum(A - Y)\n cost = np.squeeze(np.array(cost))\n grads = {\"dw\": dw, \"db\": db}\n\n return grads, cost",
"def propagate(w, b, X, Y):\n\n m = X.shape[1]\n\n A = sigmoid(np.dot(w.T, X) + b)\n cost = (-1 / X.shape[1]) * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A))\n\n dw = (1 / X.shape[1]) * np.dot(X, (A - Y).T)\n db = (1 / X.shape[1]) * np.sum(A - Y)\n\n grads = {\"dw\": dw,\n \"db\": db}\n\n return grads, cost",
"def add_optimizers_to_graph(self):\n with tf.device(self.params.device):\n with self.graph.as_default():\n with tf.compat.v1.variable_scope(\"optimizers\") as scope:\n self.grads_and_vars = list() # [sch_idx][weight_idx]\n self.apply_grads = list() # [sch_idx][weight_idx]\n self.learning_rates = list() # [sch_idx][weight_idx]\n if self.params.optimizer == \"lbfgsb\":\n self.minimizer = None\n #self.minimizer = tfp.optimizer.lbfgs_minimize(\n # value_and_gradients_function=self.loss_value_and_grad,#self.total_loss,\n # initial_position=self.w_init,#self.trainable_variables,\n # max_iterations=self.params.maxiter)\n #self.minimizer = tf.contrib.opt.ScipyOptimizerInterface(self.total_loss,\n # options={\"maxiter\":self.params.maxiter}) # Default method is L-BFGSB\n for schedule_idx, sch in enumerate(self.params.schedule):\n sch_grads_and_vars = list() # [weight_idx]\n sch_apply_grads = list() # [weight_idx]\n sch_lrs = list() # [weight_idx]\n #Construct weight ops\n weight_ops = [self.trainable_variables[weight] for weight in sch[\"weights\"]]\n for w_idx, weight in enumerate(sch[\"weights\"]):\n weight_name = weight.split(\"/\")[-1].split(\":\")[0]\n learning_rates = tf.compat.v1.train.exponential_decay(\n learning_rate=sch[\"weight_lr\"][w_idx],\n global_step=self.global_step,\n decay_steps=sch[\"decay_steps\"][w_idx],\n decay_rate=sch[\"decay_rate\"][w_idx],\n staircase=sch[\"staircase\"][w_idx],\n name=\"annealing_schedule_\"+weight_name)\n sch_lrs.append(learning_rates)\n if self.params.optimizer == \"sgd\":\n optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rates,\n name=\"grad_optimizer_\"+weight_name)\n elif self.params.optimizer == \"adam\":\n optimizer = tf.compat.v1.train.AdamOptimizer(learning_rates, beta1=0.9, beta2=0.99,\n epsilon=1e-07, name=\"adam_optimizer_\"+weight_name)\n elif self.params.optimizer == \"adadelta\":\n optimizer = tf.compat.v1.train.AdadeltaOptimizer(learning_rates, epsilon=1e-07,\n name=\"adadelta_optimizer_\"+weight_name)\n elif self.params.optimizer == \"lbfgsb\":\n optimizer = None\n else:\n assert False, (\"Optimizer \"+self.params.optimizer+\" is not supported.\")\n weight_op = self.trainable_variables[weight]\n sch_grads_and_vars.append(self.compute_weight_gradients(optimizer, weight_op))\n gstep = self.global_step if w_idx == 0 else None # Only increment once\n if self.params.optimizer == \"lbfgsb\": # BFGS doesn't actually need the update op\n if w_idx == 0:\n sch_apply_grads.append(tf.compat.v1.assign_add(self.global_step, 1))\n else:\n sch_apply_grads.append(None)\n else:\n sch_apply_grads.append(optimizer.apply_gradients(sch_grads_and_vars[w_idx],\n global_step=gstep))\n self.learning_rates.append(sch_lrs)\n self.grads_and_vars.append(sch_grads_and_vars)\n self.apply_grads.append(sch_apply_grads)\n self.optimizers_added = True",
"def build_optimizer(model: nn.Module, args: Namespace) -> Optimizer:\n params = [{'params': model.parameters(), 'lr': args.init_lr, 'weight_decay': 0}]\n\n return Adam(params)",
"def optimizer_for_idx(idx, training_steps):\n config = common.get_optimizer_config(idx)\n config['training_steps'] = training_steps\n return NAdamWCosineDecay(**config)",
"def propose_optimize():\n pass",
"def _create_networks_and_optimizer(self):\n self.policy_net = DeepQNetwork(self.num_inputs,\n self.hidden_layers, \n self.num_actions).to(device)\n self.target_net = DeepQNetwork(self.num_inputs,\n self.hidden_layers, \n self.num_actions).to(device)\n self._update_target_net()\n \n self.optimizer = optim.Adam(self.policy_net.parameters(), \n lr=self.lr, eps=1e-7)",
"def add_objective(self): \n \n if \"CSS\" in self.algorithm:\n \n if self.num_hidden == 0:\n \n data_term = self.compute_energy(self.x, self.batch_size)\n \n else:\n \n data_term = self.compute_free_energy(self.x)\n \n normalizer_term = self.add_css_approximation(data_term)\n \n if \"CD\" in self.algorithm and self.num_hidden ==0:\n \n data_term = self.compute_energy(self.x, self.batch_size)\n \n normalizer_term = self.compute_energy(self.x_gibbs, \n self.batch_size)\n \n normalizer_term = -T.mean(normalizer_term)\n \n if \"CD\" in self.algorithm and self.num_hidden > 0:\n \n data_term = self.compute_free_energy(self.x)\n \n normalizer_term = self.compute_free_energy(self.rbm_cd_samples)\n \n normalizer_term = -T.mean(normalizer_term)\n \n # cost is negative log likelihood \n self.cost = T.mean(data_term) + normalizer_term",
"def create_optimizer(init_lr, num_train_steps, num_warmup_steps):\n # Implements linear decay of the learning rate.\n learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(\n initial_learning_rate=init_lr,\n decay_steps=num_train_steps,\n end_learning_rate=0.0)\n if num_warmup_steps:\n learning_rate_fn = WarmUp(initial_learning_rate=init_lr,\n decay_schedule_fn=learning_rate_fn,\n warmup_steps=num_warmup_steps)\n optimizer = AdamWeightDecay(\n learning_rate=learning_rate_fn,\n weight_decay_rate=0.01,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=[LAYER_NORM_NAME, 'bias'])\n return optimizer",
"def optimizer(self):\n \n # taken from https://github.com/germain-hug/Deep-RL-Keras/blob/master/DDPG/actor.py\n # I believe this is a work around to get keras to learn **given a gradient**\n # As opposed to bunch of x_train, y_trains?\n \n #Inputs\n state_pl = self.model.input\n action_grads_pl = K.placeholder(shape=(None,1)) \n \n #Find grad_(pars) mu(state)\n mu_pl = self.model.output\n pars = self.model.trainable_weights\n pars_grad_mu = tf.gradients(mu_pl, pars, -action_grads_pl)\n \n #grads_and_pars = zip(pars_grad_mu, pars) #keras needs this form\n #updates = tf.train.AdamOptimizer(self.lr).apply_gradients(grads_and_pars)\n\n # The gradients as defined above work on my mac, but not ubuntu.\n # Below I am trying a workaround. I changed the keras source code \n # To get this working. Specifically, I make the optimizer.get_updates()\n # function accept custom gradients. It was easy to do.\n \n opt = Adam(self.lr)\n loss = pars_grad_mu #placeholder, I won't use it\n updates = opt.get_updates(loss = loss, params = pars, grads = pars_grad_mu)\n\n return K.function(inputs = [state_pl, action_grads_pl], outputs = [], updates = updates)\n #return K.function(inputs = [state_pl, action_grads_pl], outputs = [updates])",
"def adam_optimizer() -> mt.HyperparameterConfig:\n # Create a dict of hyperparameters and their feasible ranges\n hyperparam_dict = {\n # ADAM optimization hyperparameters\n # Ref: Kingma, Diederik P., and Jimmy Ba.\n # \"Adam: A method for stochastic optimization.\" (2014).\n # log10 of alpha1 := 1 - beta1\n 'log_alpha1': -1.5,\n # log10 of alpha2 := 1 - beta2\n 'log_alpha2': -2.1,\n # log10 of epsilon\n 'log_epsilon': -7.,\n # Other hyperparameters\n # Minimum value for the voxel weights in the summed xentropy op\n 'weight_floor': 0.01,\n\n # Exponential learning rate decay hyperparams\n # log10 of the learning rate\n 'log_learning_rate': -3.,\n # log10 of the number of Decay steps\n 'log_decay_steps': 3.4,\n # Exponential decay rate\n 'exponential_decay_rate': 0.75\n }\n\n return mt.HyperparameterConfig(hyperparam_dict)",
"def create_optimizer(optimizer_name, model, config):\n if optimizer_name == 'adadelta':\n return torch.optim.Adadelta(model.parameters(),\n lr=config['adadelta_lr'],\n rho=config['adadelta_rho'],\n weight_decay=config['adadelta_weight_decay'],\n eps=config['adadelta_eps'])\n else:\n raise Exception('Optimizer \\'{}\\' not supported.'.format(optimizer_name))",
"def _build_backward_graph(self):\n\n print('[*] Building optimization problem.')\n with tf.variable_scope('optimization'):\n for t in range(0, self.sequence_length):\n print_progress(float(t+1) / self.sequence_length)\n\n # loss is a binary crossentropy for each timestep\n self.loss += self.bce(self.targets[t], self.outputs[t])\n\n self.opt = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate)\n g = self.opt.compute_gradients(self.loss)\n clipped_g = [(tf.clip_by_value(grad, self.min_grad, self.max_grad), var) for grad, var in g]\n self.opt_step = self.opt.apply_gradients(clipped_g)",
"def lfads_params(key, lfads_hps):\n keys = random.split(key, 9)\n\n data_dim = lfads_hps['data_dim']\n ntimesteps = lfads_hps['ntimesteps']\n enc_dim = lfads_hps['enc_dim']\n con_dim = lfads_hps['con_dim']\n ii_dim = lfads_hps['ii_dim']\n gen_dim = lfads_hps['gen_dim']\n factors_dim = lfads_hps['factors_dim']\n batch_size = lfads_hps['batch_size']\n ic_dim = enc_dim # Could make a different HP via a linear layer\n ib_dim = lfads_hps['ib_dim'] # inferred bias is a static input to generator\n z_dim = ic_dim + ib_dim + ntimesteps * ii_dim\n gmm_size = lfads_hps['gmm_size']\n\n ic_enc_params = {'fwd_rnn' : gru_params(keys[0], enc_dim, data_dim),\n 'bwd_rnn' : gru_params(keys[1], enc_dim, data_dim)}\n post_ib_params = affine_params(keys[2], 2*ib_dim, 2*enc_dim) # m, v <- bi \n post_ic_params = affine_params(keys[3], 2*gen_dim, 2*enc_dim) # m, v <- bi\n \n prior_params = gmm_params(keys[4], gmm_size, z_dim)\n con_params = gru_params(keys[5], con_dim, 2*enc_dim + factors_dim + ii_dim)\n con_out_params = affine_params(keys[6], 2*ii_dim, con_dim) #m, v\n gen_params = gru_params(keys[7], gen_dim, ii_dim + ib_dim)\n factors_params = linear_params(keys[8], factors_dim, gen_dim)\n lograte_params = affine_params(keys[9], data_dim, factors_dim)\n\n return {'ic_enc' : ic_enc_params,\n 'post_ib' : post_ib_params,\n 'post_ic' : post_ic_params,\n 'con' : con_params, 'con_out' : con_out_params,\n 'gmm' : prior_params,\n 'gen' : gen_params, 'factors' : factors_params,\n 'f0' : np.zeros((lfads_hps['factors_dim'],)),\n 'ii0' : np.zeros((lfads_hps['ii_dim'],)),\n 'logrates' : lograte_params}"
] | [
"0.6353723",
"0.6047894",
"0.6027479",
"0.59312564",
"0.5912963",
"0.5872055",
"0.58629537",
"0.584317",
"0.58246624",
"0.5762034",
"0.5742786",
"0.5726614",
"0.5690474",
"0.56794494",
"0.5674205",
"0.56507385",
"0.564974",
"0.5638503",
"0.56237",
"0.5580002",
"0.5579754",
"0.55759126",
"0.5575868",
"0.55674785",
"0.5562582",
"0.55397785",
"0.55348766",
"0.5519789",
"0.55137235",
"0.55071867"
] | 0.67412966 | 0 |
Compute orbit positions for the general two body problem from the initial orbital elements with a deterministic mathematical model. Factory function that returns a functional model. | def make_position_model_g2b_math(traj_size = 731):
num_particles = 2
space_dims = 3
t = keras.Input(shape=(traj_size,), name='t')
q0 = keras.Input(shape=(num_particles, space_dims,), name='q0')
v0 = keras.Input(shape=(num_particles, space_dims,), name='v0')
m = keras.Input(shape=(num_particles,), name='m')
# Wrap these up into one tuple of inputs for the model
inputs = (t, q0, v0, m)
# The gravitational constant; numerical value close to 4 pi^2; see rebound documentation for exact value
G = tf.constant(G_)
# Unpack masses and calculate total mass
m1 = m[:, 0]
m2 = m[:, 1]
m_tot = m1 + m2
# Gravitational field strength; shape (batch_size,)
r2_mu = G * m_tot
# Reshape the gravitational field strength from (batch_size,) to (batch_size, 1,)
r2_mu = keras.layers.Reshape((1,))(r2_mu)
# Extract the relative position and relative velocity in Jacobi coordinates
r2_q0 = q0[:, 1, :] - q0[:, 0, :]
r2_v0 = v0[:, 1, :] - v0[:, 0, :]
# Tuple of inputs for the model converting from configuration to orbital elements
r2_cfg = (r2_q0, r2_v0, r2_mu)
# Model mapping cartesian coordinates to orbital elements
model_c2e = make_model_cfg_to_elt()
# Extract the orbital elements of the initial conditions
a0, e0, inc0, Omega0, omega0, f0, M0, N0 = model_c2e(r2_cfg)
# Alias r2_mu for naming consistency
mu0 = r2_mu
# Reshape t to (batch_size, traj_size, 1)
t_vec = keras.layers.Reshape(target_shape=(traj_size, 1), name='t_vec')(t)
# Repeat the constant orbital elements to be vectors of shape (batch_size, traj_size)
a = keras.layers.RepeatVector(n=traj_size, name='a')(a0)
e = keras.layers.RepeatVector(n=traj_size, name='e')(e0)
inc = keras.layers.RepeatVector(n=traj_size, name='inc')(inc0)
Omega = keras.layers.RepeatVector(n=traj_size, name='Omega')(Omega0)
omega = keras.layers.RepeatVector(n=traj_size, name='omega')(omega0)
mu = keras.layers.RepeatVector(n=traj_size, name='mu')(mu0)
# Repeat initial mean anomaly M0 and mean motion N0 to match shape of outputs
M0_vec = keras.layers.RepeatVector(n=traj_size, name='M0_vec')(M0)
N0_vec = keras.layers.RepeatVector(n=traj_size, name='N0_vec')(N0)
# Compute the mean anomaly M(t) as a function of time
N_t = keras.layers.multiply(inputs=[N0_vec, t_vec])
M = keras.layers.add(inputs=[M0_vec, N_t])
# Compute the true anomaly from the mean anomly and eccentricity
f = MeanToTrueAnomaly(name='mean_to_true_anomaly')([M, e])
# Wrap orbital elements into one tuple of inputs for layer converting to cartesian coordinates
r2_elt = (a, e, inc, Omega, omega, f, mu,)
# Model mapping orbital elements to cartesian coordinates
model_e2c = make_model_elt_to_cfg()
# Convert from orbital elements to cartesian coordinates
# This is the position and velocity of the Jacobi coordinate r2 = q2 - q1
r2_q, r2_v = model_e2c(r2_elt)
# Reshape coefficients for q1 and q2 from r2
coeff_shape = (1,1,)
coeff_shape_layer = keras.layers.Reshape(target_shape=coeff_shape, name='coeff_shape')
coeff1 = coeff_shape_layer(-m2 / m_tot)
coeff2 = coeff_shape_layer( m1 / m_tot)
# Compute the position and velocity of the individual particles from the Jacobi coordinates
q1 = coeff1 * r2_q
q2 = coeff2 * r2_q
v1 = coeff1 * r2_v
v2 = coeff2 * r2_v
# Assemble the position and velocity
particle_traj_shape = (-1, 1, 3)
particle_traj_shape_layer = keras.layers.Reshape(target_shape=particle_traj_shape, name='particle_traj_shape')
q1 = particle_traj_shape_layer(q1)
q2 = particle_traj_shape_layer(q2)
v1 = particle_traj_shape_layer(v1)
v2 = particle_traj_shape_layer(v2)
q = keras.layers.concatenate(inputs=[q1, q2], axis=-2)
v = keras.layers.concatenate(inputs=[v1, v2], axis=-2)
# Wrap up the outputs
outputs = (q, v)
# Wrap this into a model
model = keras.Model(inputs=inputs, outputs=outputs, name='model_g2b_position_math')
return model | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_physics_model_r2bc_math(position_model: keras.Model, traj_size: int):\n # Create input layers\n t = keras.Input(shape=(traj_size,), name='t')\n q0 = keras.Input(shape=(2,), name='q0')\n v0 = keras.Input(shape=(2,), name='v0')\n mu = keras.Input(shape=(1,), name='mu')\n # The combined input layers\n inputs = [t, q0, v0, mu]\n\n # Check sizes\n batch_size = t.shape[0]\n tf.debugging.assert_shapes(shapes={\n t: (batch_size, traj_size),\n q0: (batch_size, 2),\n v0: (batch_size, 2),\n mu: (batch_size, 1),\n }, message='make_physics_model_r2bc_math / inputs')\n \n # Return row 0 of a position or velocity for q0_rec and v0_rec\n initial_row_func = lambda q : q[:, 0, :]\n\n # The polar coordinates of the initial conditions\n # r0, theta0, and omega0 each scalars in each batch\n r0, theta0, omega0 = ConfigToPolar2D(name='polar0')([q0, v0])\n \n # Name the outputs of the initial polar\n # These each have shape (batch_size, 1)\n r0 = Identity(name='r0')(r0)\n theta0 = Identity(name='theta0')(theta0)\n omega0 = Identity(name='omega0')(omega0)\n\n # Check sizes\n tf.debugging.assert_shapes(shapes={\n r0: (batch_size, 1),\n theta0: (batch_size, 1),\n omega0: (batch_size, 1),\n }, message='make_physics_model_r2bc_math / polar elements r0, theta0, omega0')\n \n # Compute the motion from the specified position layer\n q, v, a = Motion_R2BC(position_model=position_model, name='motion')([t, r0, theta0, omega0])\n \n # Name the outputs of the circular motion\n # These each have shape (batch_size, traj_size, 2)\n q = Identity(name='q')(q)\n v = Identity(name='v')(v)\n a = Identity(name='a')(a)\n\n # Check sizes\n tf.debugging.assert_shapes(shapes={\n q: (batch_size, traj_size, 2),\n v: (batch_size, traj_size, 2),\n a: (batch_size, traj_size, 2),\n }, message='make_physics_model_r2bc_math / outputs q, v, a')\n \n # Compute q0_rec and v0_rec\n # These each have shape (batch_size, 2)\n q0_rec = keras.layers.Lambda(initial_row_func, name='q0_rec')(q)\n v0_rec = keras.layers.Lambda(initial_row_func, name='v0_rec')(v)\n\n # Check sizes\n tf.debugging.assert_shapes(shapes={\n q0_rec: (batch_size, 2),\n v0_rec: (batch_size, 2),\n }, message='make_physics_model_r2bc_math / outputs q0_rec, v0_rec')\n\n # Compute kinetic energy T and potential energy U\n T = KineticEnergy_R2BC(name='T')(v)\n U = PotentialEnergy_R2BC(name='U')([q, mu])\n\n # Compute the total energy H\n H = keras.layers.add(inputs=[T,U], name='H')\n\n # Compute angular momentum L\n # This has shape (batch_size, traj_size)\n L = AngularMomentum_R2BC(name='L')([q, v])\n \n # Check sizes\n tf.debugging.assert_shapes(shapes={\n T: (batch_size, traj_size),\n U: (batch_size, traj_size),\n H: (batch_size, traj_size),\n L: (batch_size, traj_size),\n }, message='make_physics_model_r2bc_math / outputs H, L')\n\n # Wrap this up into a model\n outputs = [q, v, a, q0_rec, v0_rec, H, L]\n model = keras.Model(inputs=inputs, outputs=outputs, name='model_math')\n return model",
"def solar_model():\n \n latitude, longitude, timezone, elevation = location_input()\n year, time = time_input()\n\n lat_r = latitude/180*np.pi\n lon_r = longitude/180*np.pi \n n = 0\n for i in range(1900,year):\n if i%4 == 0:\n n += 366\n else:\n n+=365\n JulD = n + time + 2415018.5 - (timezone)/24\n LT = time - int(time)\n JC = (JulD - 2451545) / 36525\n x = 46.815 + JC * (0.00059 - JC * 0.001813)\n M_OE = 23 + (26 + (21.448 - JC * x) / 60) / 60\n EEO = 0.016708634 - JC * (0.000042037 + 0.0000001267 * JC)\n GMAS = 357.52911 + JC * (35999.05029 - 0.0001537 * JC)\n GMAS_r = m.radians(GMAS)\n GMLS = (280.46646 + JC * (36000.76983 + JC * 0.0003032))%360\n GMLS_r = m.radians(GMLS)\n Obliq_C = M_OE + 0.00256 * np.cos((125.04 - 1934.136 * JC) / 180 * np.pi)\n Obliq_C_r = m.radians(Obliq_C)\n SEC = np.sin(GMAS_r) * (1.914602 - JC * (0.004817 + 0.000014 * JC)) + np.sin(2 * GMAS_r) * (0.019993 - 0.000101 * JC) + np.sin(3 * GMAS_r) * 0.000289\n STL = GMLS + SEC\n SAL = STL - 0.00569 - 0.00478 * np.sin((125.04 - 1934.136 * JC) / 180 * np.pi)\n SAL_r = m.radians(SAL)\n sin_Delta = np.sin(Obliq_C_r) * np.sin(SAL_r)\n Delta_r = np.arcsin(sin_Delta) #in radians \n Var_y = np.tan((Obliq_C / 2) / 180 * np.pi) * np.tan((Obliq_C / 2) / 180 * np.pi)\n EOT_prime = Var_y * np.sin(2 * GMLS_r) - 2 * EEO * np.sin(GMAS_r) + 4 * EEO * Var_y * np.sin(GMAS_r) * np.cos(2 * GMLS_r) - 0.5 * Var_y * Var_y * np.sin(4 * GMLS_r) - 1.25 * EEO * EEO * np.sin(2 * GMAS_r)\n EOT = 4 * EOT_prime / np.pi * 180 \n TST = (LT * 1440 + EOT + 4 * longitude - 60 * timezone)%1440\n if TST / 4 < 0:\n Omega = TST/4+180\n else:\n Omega = TST/4 - 180 \n Omega_r = m.radians(Omega)\n \n cos_Zenith = np.sin(lat_r) * np.sin(Delta_r) + np.cos(lat_r) * np.cos(Delta_r) * np.cos(Omega_r)\n Zenith_r = np.arccos(cos_Zenith) #in radians\n Aprime_r = np.arccos((np.sin(lat_r) * np.cos(Zenith_r) - np.sin(Delta_r)) / (np.cos(lat_r) * np.sin(Zenith_r)))\n Aprime = Aprime_r / np.pi * 180\n if Omega > 0:\n Azimuth = (Aprime + 180) % 360 #in degrees\n else:\n Azimuth = (540 - Aprime) % 360 #in degrees \n Azimuth_r = Azimuth / 180 * np.pi\n Elev_angle = (np.pi)/2 - Zenith_r\n\n \n # calculate incidence angle\n # Beta is equal to angle of tilted surface to horizontal (in radians)\n Beta = 45 # in degrees\n Beta_r = m.radians(Beta)\n \n cos_incidence = np.sin(Delta_r)* np.sin(lat_r) * np.cos(Beta_r) - np.sin(Delta_r) * np.cos(lat_r) * np.sin(Beta_r) * np.cos(Azimuth_r) + np.cos(Delta_r) * np.cos(lat_r) * np.cos(Beta_r) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(lat_r) * np.sin(Beta_r) * np.cos(Azimuth_r) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(Beta_r) * np.sin(Azimuth_r) * np.sin(Omega_r) \n incidence_ang_r = np.arccos(cos_incidence)\n \n return Delta_r, lat_r, Omega_r, Zenith_r, Azimuth_r, Elev_angle",
"def global_forces(elements, mats, nodes, neq, DME_mat , UC):\r\n IELCON = np.zeros([2], dtype=np.integer)\r\n nels = elements.shape[0]\r\n nnodes = 2\r\n#\r\n for el in range(nels):\r\n iet = np.int(elements[el , 1])\r\n if iet == 0:\r\n ndof = 6\r\n FG = np.zeros((nels, 6))\r\n ul = np.zeros(6)\r\n fl = np.zeros(6)\r\n elif iet == 1:\r\n ndof = 4\r\n FG = np.zeros((nels, 4))\r\n ul = np.zeros(4)\r\n fl = np.zeros(4) \r\n#\r\n for el in range(nels):\r\n#\r\n iet = np.int(elements[el , 1]) \r\n#\r\n elcoor = np.zeros([nnodes, 2])\r\n im = np.int(elements[el , 2])\r\n par0 = mats[im , 0] # Iz\r\n par1 = mats[im , 1] # Emod\r\n par2 = mats[im , 2] # A\r\n for j in range(nnodes):\r\n IELCON[j] = elements[el , j+3]\r\n elcoor[j, 0] = nodes[IELCON[j] , 1]\r\n elcoor[j, 1] = nodes[IELCON[j] , 2] \r\n for j in range(ndof):\r\n ig = DME_mat[el, j]\r\n ul[j] = UC[ig] \r\n if iet == 0: \r\n fl = reac_beam2D_global(elcoor , par0, par1 , par2 , ul)\r\n elif iet == 1: \r\n fl = reac_beam2DU_global(elcoor , par0, par1 , ul)\r\n FG[el , :] = fl[:]\r\n \r\n return FG",
"def make_position_model_r2bc_math(traj_size = 731):\n # Create input layers\n t = keras.Input(shape=(traj_size), name='t')\n r0 = keras.Input(shape=(1,), name='r0')\n theta0 = keras.Input(shape=(1,), name='theta0')\n omega0 = keras.Input(shape=(1,), name='omega0')\n # The combined input layers\n inputs = [t, r0, theta0, omega0]\n \n # Reshape t to (batch_size, traj_size, 1)\n t_vec = keras.layers.Reshape(target_shape=(traj_size, 1), name='t_vec')(t)\n \n # Repeat r, theta0 and omega to be vectors of shape (batch_size, traj_size)\n r = keras.layers.RepeatVector(n=traj_size, name='r')(r0)\n theta0 = keras.layers.RepeatVector(n=traj_size, name='theta0_vec')(theta0)\n omega = keras.layers.RepeatVector(n=traj_size, name='omega_vec')(omega0)\n\n # Check shapes\n batch_size = t.shape[0]\n tf.debugging.assert_shapes(shapes={\n t_vec: (batch_size, traj_size, 1),\n r: (batch_size, traj_size, 1),\n theta0: (batch_size, traj_size, 1),\n omega: (batch_size, traj_size, 1)\n }, message='make_position_model_r2bc_math / inputs')\n \n # The angle theta at time t\n # theta = omega * t + theta0\n omega_t = keras.layers.multiply(inputs=[omega, t_vec], name='omega_t')\n theta = keras.layers.add(inputs=[omega_t, theta0], name='theta')\n\n # Cosine and sine of theta\n cos_theta = keras.layers.Activation(activation=tf.cos, name='cos_theta')(theta)\n sin_theta = keras.layers.Activation(activation=tf.sin, name='sin_theta')(theta)\n\n # Compute qx and qy from r, theta\n qx = keras.layers.multiply(inputs=[r, cos_theta], name='qx')\n qy = keras.layers.multiply(inputs=[r, sin_theta], name='qy')\n \n # Check shapes\n tf.debugging.assert_shapes(shapes={\n omega_t: (batch_size, traj_size, 1),\n theta: (batch_size, traj_size, 1),\n cos_theta: (batch_size, traj_size, 1),\n sin_theta: (batch_size, traj_size, 1),\n qx: (batch_size, traj_size, 1),\n qy: (batch_size, traj_size, 1),\n }, message='make_position_model_r2bc_math / outputs')\n \n # Wrap this into a model\n outputs = [qx, qy]\n model = keras.Model(inputs=inputs, outputs=outputs, name='model_r2bc_math')\n return model",
"def dynamics(x,Earth):\r\n\r\n # precompute a few terms to reduce number of operations\r\n r = norm(x[0:3])\r\n Re_r_sqr = 1.5*Earth.J2*(Earth.R/r)**2\r\n five_z_sqr = 5*x[2]**2/(r**2)\r\n\r\n # two body and J2 acceleration together\r\n accel = (-Earth.mu/(r**3))*np.array([x[0]*(1 - Re_r_sqr*(five_z_sqr - 1)),\r\n x[1]*(1 - Re_r_sqr*(five_z_sqr - 1)),\r\n x[2]*(1 - Re_r_sqr*(five_z_sqr - 3))])\r\n\r\n return np.array([x[3],x[4],x[5],accel[0],accel[1],accel[2]])",
"def nbody_solve(t,y, G,masses):\r\n N_bodies = int(len(y) / 6)\r\n solved_vector = np.zeros(y.size)\r\n for i in range(N_bodies):\r\n ioffset = i * 6 \r\n for j in range(N_bodies):\r\n joffset = j*6\r\n solved_vector[ioffset] = y[ioffset+3]\r\n solved_vector[ioffset+1] = y[ioffset+4]\r\n solved_vector[ioffset+2] = y[ioffset+5]\r\n if i != j:\r\n dx = y[ioffset] - y[joffset]\r\n dy = y[ioffset+1] - y[joffset+1]\r\n dz = y[ioffset+2] - y[joffset+2] \r\n r = (dx**2+dy**2+dz**2)**0.5\r\n ax = (-G*masses[j] / r**3) * dx\r\n ay = (-G*masses[j] / r**3) * dy\r\n az = (-G*masses[j] / r**3) * dz\r\n #ax = ax.value\r\n #ay = ay.value\r\n #az = az.value\r\n solved_vector[ioffset+3] += ax\r\n solved_vector[ioffset+4] += ay\r\n solved_vector[ioffset+5] += az \r\n return solved_vector",
"def get_sol(self):",
"def crank_nicolson_fd(main_args,\n boundary_left_args, boundary_right_args,\n initial_func,\n min_x, max_x,\n max_t,\n step_x, step_t,\n boundary_approximation_func='first_order_two_points',\n theta=0.5):\n\n d = {\n 'first_order_two_points': _o1p2, # o - order, p - points\n 'second_order_two_points': _o2p2,\n 'second_order_three_points': _o2p3\n }\n (complete_matrix,\n complete_vector) = d[boundary_approximation_func](main_args,\n boundary_left_args, boundary_right_args,\n step_x, step_t,\n min_x, max_x)\n\n m = int(max_t / step_t) + 1\n n = int((max_x - min_x) / step_x) + 1\n u = [None for _ in range(m)]\n u[0] = [initial_func(min_x + x * step_x) for x in range(n)]\n\n a, b, c, f = main_args\n\n A = a * (1 - theta) / step_x ** 2 - b * (1 - theta) / (2 * step_x)\n B = c * (1 - theta) - 2 * a * (1 - theta) / step_x ** 2 - 1 / step_t\n C = a * (1 - theta) / step_x ** 2 + b * (1 - theta) / (2 * step_x)\n\n X = b * theta / (2 * step_x) - a * theta / step_x ** 2\n Y = 2 * a * theta / step_x ** 2 - c * theta - 1 / step_t\n Z = - a * theta / step_x ** 2 - b * theta / (2 * step_x)\n\n matrix_u_t = Matrix(size=(n, 3))\n for i in range(1, n - 1):\n matrix_u_t[i] = [A, B, C]\n complete_matrix(matrix_u_t)\n\n for t in range(1, m):\n v = Vector(size=(n, 1))\n for x in range(1, n - 1):\n v[x] = (u[t - 1][x - 1] * X +\n u[t - 1][x] * Y +\n u[t - 1][x + 1] * Z +\n (theta - 1) * f(min_x + x * step_x, t * step_t) -\n theta * f(min_x + x * step_x, (t - 1) * step_t))\n complete_vector(v, t * step_t, matrix_u_t, u[t-1][0], u[t-1][-1])\n u[t] = list(TDMA(mtrx=matrix_u_t, vec=v).solve())\n\n return u",
"def orbit_posvel(Ms,eccs,semimajors,mreds,obspos=None):\n\n Es = Efn(Ms,eccs) #eccentric anomalies by interpolation\n\n rs = semimajors*(1-eccs*np.cos(Es))\n nus = 2 * np.arctan2(np.sqrt(1+eccs)*np.sin(Es/2),np.sqrt(1-eccs)*np.cos(Es/2))\n\n xs = semimajors*(np.cos(Es) - eccs) #AU\n ys = semimajors*np.sqrt(1-eccs**2)*np.sin(Es) #AU\n \n Edots = np.sqrt(G*mreds*MSUN/(semimajors*AU)**3)/(1-eccs*np.cos(Es))\n \n xdots = -semimajors*AU*np.sin(Es)*Edots/1e5 #km/s\n ydots = semimajors*AU*np.sqrt(1-eccs**2)*np.cos(Es)*Edots/1e5 # km/s\n \n n = np.size(xs)\n\n orbpos = SkyCoord(xs,ys,0*u.AU,representation='cartesian',unit='AU')\n orbvel = SkyCoord(xdots,ydots,0*u.km/u.s,representation='cartesian',unit='km/s')\n if obspos is None:\n obspos = random_spherepos(n) #observer position\n if type(obspos) == type((1,2,3)):\n obspos = SkyCoord(obspos[0],obspos[1],obspos[2],\n representation='cartesian').represent_as('physicsspherical')\n\n if not hasattr(obspos,'theta'): #if obspos not physics spherical, make it \n obspos = obspos.represent_as('physicsspherical')\n \n #random orientation of the sky 'x-y' coordinates\n psi = rand.random(n)*2*np.pi \n\n #transform positions and velocities into observer coordinates\n x,y,z = orbitproject(orbpos.x,orbpos.y,obspos.theta,obspos.phi,psi)\n vx,vy,vz = orbitproject(orbvel.x,orbvel.y,obspos.theta,obspos.phi,psi)\n\n return (SkyCoord(x,y,z,representation='cartesian'),\n SkyCoord(vx,vy,vz,representation='cartesian'))",
"def make_model(self, incl, psi, PA=0.0, get_2d=True, int_kwargs={}, vel_kwargs={}, lw_kwargs=None):\n if PA: x_plane, y_plane = Rosenfeld2d._rotate_sky_plane(self.grid.XYZ[0], self.grid.XYZ[1], -PA)\n else: x_plane, y_plane = self.grid.XYZ[:2]\n\n cos_incl = np.cos(incl)\n sin_incl = np.sin(incl)\n y_plane_cos_incl = y_plane/cos_incl\n\n #**********************\n #ROSENFELD COEFFICIENTS\n fac = -2*np.sin(psi)**2\n A = np.cos(2*incl) + np.cos(2*psi)\n B = fac * 2*(sin_incl/cos_incl) * y_plane\n C = fac * (x_plane**2 + (y_plane_cos_incl)**2)\n t = self._get_t(A,B,C).T\n\n #****************************\n #ROSENFELD CONVERSION X<-->X'\n x_true_near = x_plane\n y_true_near = y_plane_cos_incl + t[1]*sin_incl\n \n x_true_far = x_plane\n y_true_far = y_plane_cos_incl + t[0]*sin_incl\n \n #np.hypot 2x faster than np.linalg.norm([x,y], axis=0)\n R_true_near = np.hypot(x_true_near, y_true_near) \n R_true_far = np.hypot(x_true_far, y_true_far)\n\n z_true_near = t[1] * cos_incl\n z_true_far = t[0] * cos_incl \n\n phi_true_near = np.arctan2(y_true_near, x_true_near) \n phi_true_far = np.arctan2(y_true_far, x_true_far) \n\n #****************************\n \n grid_true = {'near': [x_true_near, y_true_near, z_true_near, R_true_near, phi_true_near], \n 'far': [x_true_far, y_true_far, z_true_far, R_true_far, phi_true_far]}\n\n #*******************************\n #COMPUTE PROPERTIES ON TRUE GRID\n avai_kwargs = [vel_kwargs, int_kwargs, lw_kwargs]\n avai_funcs = [self.velocity_func, self.intensity_func, self.linewidth_func]\n true_kwargs = [isinstance(kwarg, dict) for kwarg in avai_kwargs]\n prop_kwargs = [kwarg for i, kwarg in enumerate(avai_kwargs) if true_kwargs[i]]\n prop_funcs = [func for i, func in enumerate(avai_funcs) if true_kwargs[i]]\n props = self._compute_prop(grid_true, prop_funcs, prop_kwargs)\n #Positive vel is positive along z, i.e. pointing to the observer, for that reason imposed a (-) factor to convert to the standard convention: (+) receding \n if true_kwargs[0]:\n ang_fac_near = -sin_incl * np.cos(phi_true_near)\n ang_fac_far = -sin_incl * np.cos(phi_true_far)\n props[0]['near'] *= ang_fac_near \n props[0]['far'] *= ang_fac_far\n \n #*************************************\n\n return [{side: prop[side].reshape(self.grid.Nodes[:2]) for side in ['near', 'far']} for prop in props]",
"def get_position(self, t0):\n my_pos_x=np.random.uniform(-20, 20)\n my_pos_y=np.random.uniform(-20, 20)\n r=np.array([my_pos_x, my_pos_y])\n x_y=np.zeros(shape=(self.no_planets-1, 2))\n tol=1e-5\n diff=np.zeros(self.no_planets-1)\n for k in range(self.no_planets-1):\n r1=np.linalg.norm(r)\n r2=np.linalg.norm(r-self.positionFunction(t0)[:, k])\n r3=np.linalg.norm(r-self.positionFunction(t0)[:, k+1])\n x1=0\n y1=0\n x2=self.positionFunction(t0)[0,k]\n y2=self.positionFunction(t0)[1,k]\n x3=self.positionFunction(t0)[0,k+1]\n y3=self.positionFunction(t0)[1, k+1]\n x,y,difference=self.triangulate_analytic(x1,y1,r1,x2,y2,r2,x3,y3,r3)\n x_y[k, 0]=x\n x_y[k, 1]=y\n diff[k]=difference\n if (diff > tol).any():\n print diff.max()\n print \"Oh no, one failed :(\"\n sys.exit(1)\n print \"My pos x:\", my_pos_x\n print \"My pos y:\", my_pos_y\n #return x1, y1, r1, x2, y2, r2, x3, y3, r3",
"def solve_model(init_amounts, times, neighbourhood, params):\n # init_amounts should be an array of length 3*no_cultures.\n growth_func = make_cns_model(params, neighbourhood)\n sol = odeint(growth_func, init_amounts, times)\n return np.maximum(0, sol)",
"def local_forces(elements, mats, nodes, neq, DME_mat , UC):\r\n IELCON = np.zeros([2], dtype=np.integer)\r\n nels = elements.shape[0]\r\n nnodes = 2\r\n#\r\n for el in range(nels):\r\n iet = np.int(elements[el , 1])\r\n if iet == 0:\r\n ndof = 6\r\n FG = np.zeros((nels, 6))\r\n ul = np.zeros(6)\r\n fl = np.zeros(6)\r\n elif iet == 1:\r\n ndof = 4\r\n FG = np.zeros((nels, 4))\r\n ul = np.zeros(4)\r\n fl = np.zeros(4) \r\n#\r\n for el in range(nels):\r\n#\r\n iet = np.int(elements[el , 1]) \r\n#\r\n elcoor = np.zeros([nnodes, 2])\r\n im = np.int(elements[el , 2])\r\n par0 = mats[im , 0] # Iz\r\n par1 = mats[im , 1] # Emod\r\n par2 = mats[im , 2] # A\r\n for j in range(nnodes):\r\n IELCON[j] = elements[el , j+3]\r\n elcoor[j, 0] = nodes[IELCON[j] , 1]\r\n elcoor[j, 1] = nodes[IELCON[j] , 2] \r\n for j in range(ndof):\r\n ig = DME_mat[el, j]\r\n ul[j] = UC[ig] \r\n if iet == 0: \r\n fl = reac_beam2D(elcoor , par0, par1 , par2 , ul)\r\n elif iet == 1: \r\n fl = reac_beam2DU(elcoor , par0, par1 , ul)\r\n FG[el , :] = fl[:]\r\n \r\n return FG",
"def two_body(lat, h, psi_r, psi_i):\n\n #minus sign is to eliminate the hopping constant\n h_backwards = -np.tril(h)\n h_backwards[0, -1] = h[0, -1]\n h_backwards[-1, 0] = 0.\n #calculates the first expectation value in the commutator, contracting from right to left\n psi_new = one_elec(lat, h_backwards, psi_r, False) + 1.j*one_elec(lat, h_backwards, psi_i, False)\n psi_new = two_elec(lat, psi_new.real, psi_new.imag)\n expectation1 = np.dot((psi_r+1j*psi_i).conj(), psi_new)\n\n h_forwards = -np.triu(h)\n h_forwards[-1, 0] = h[-1, 0]\n h_forwards[0, -1] = 0.\n #calculates the second expectation value in the commutator, contracting from right to left\n psi_new = two_elec(lat, psi_r, psi_i)\n psi_new = one_elec(lat, h_backwards, psi_new.real, False) + 1.j*one_elec(lat, h_backwards, psi_new.imag, False)\n expectation2 = np.dot((psi_r+1j*psi_i).conj(), psi_new)\n\n return expectation1 - expectation2",
"def test1():\n P = 5000.0\n k1, k2, k3 = 1000, 2000, 3000\n # Model\n m1 = SpringModel(\"2D Model\")\n # Nodes\n n1 = Node((0,0))\n n2 = Node((0,0))\n n3 = Node((0,0))\n n4 = Node((0,0))\n # Elements\n e1 = Spring((n1,n3),k1)\n e2 = Spring((n3,n4),k2)\n e3 = Spring((n4,n2),k3)\n\n # Add elements \n for nd in (n1,n2,n3,n4):\n m1.add_node(nd)\n for el in (e1,e2,e3):\n m1.add_element(el)\n\n m1.add_force(n4,(P,))\n m1.add_constraint(n1,ux=0)\n m1.add_constraint(n2,ux=0)\n m1.solve()\n \n # a) Global matrix\n print(\"a) Global matrix:\\n {0}\".format(m1.KG))\n # b) Nodal displacement -> 3 and 4\n print(\"\\nb) Nodal displacement (3 and 4)\")\n print(\"UX3: {0}\".format(n3.ux))\n print(\"UX4: {0}\".format(n4.ux))\n # c) Reaction forces (1 and 2)\n print(\"\\nc) Nodal forces (1 and 2)\")\n print(\"FX1: {0}\".format(n1.fx))\n print(\"FX2: {0}\".format(n2.fx))\n # d) Forces in each spring\n print(\"\\nd) Element forces\")\n print(\"FE1:\\n {0}\".format(e1.fx))\n print(\"FE2:\\n {0}\".format(e2.fx))\n print(\"FE3:\\n {0}\".format(e3.fx))",
"def solve_prep(self):\n\n par = self.par\n sol = self.sol\n\n # a. retirement\n sol.m_ret = np.zeros((par.T,par.Nm_ret))\n sol.c_ret = np.zeros((par.T,par.Nm_ret))\n sol.a_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_v_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_vm_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_vn_ret = np.zeros((par.T,par.Nm_ret))\n\n # b. working\n if par.solmethod == 'G2EGM':\n\n sol.c = np.zeros((par.T,par.Nn,par.Nm))\n sol.d = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vm = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vn = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.ucon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.ucon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.ucon_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.dcon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.dcon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.dcon_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.acon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.acon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.acon_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.z = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.w = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wa = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wb = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n \n elif par.solmethod == 'NEGM':\n\n sol.c = np.zeros((par.T,par.Nn,par.Nm))\n sol.d = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vn = np.zeros((0,0,0))\n sol.inv_vm = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.w = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wa = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wb = np.zeros((0,0,0))\n \n sol.c_pure_c = np.zeros((par.T,par.Nb_pd,par.Nm))\n sol.inv_v_pure_c = np.zeros((par.T,par.Nb_pd,par.Nm))",
"def build_model(skel_dict, project_dir) -> ConcreteModel:\n links = skel_dict[\"links\"]\n positions = skel_dict[\"positions\"]\n dofs = skel_dict[\"dofs\"]\n dofs[\"r_hip\"] = [1,1,1]\n dofs[\"l_hip\"] = [1,1,1]\n dofs[\"tail_base\"] = [1,1,1]\n markers = skel_dict[\"markers\"]\n rot_dict = {}\n pose_dict = {}\n L = len(positions)\n\n phi = [sp.symbols(f\"\\\\phi_{{{l}}}\") for l in range(L)]\n theta = [sp.symbols(f\"\\\\theta_{{{l}}}\") for l in range(L)]\n psi = [sp.symbols(f\"\\\\psi_{{{l}}}\") for l in range(L)]\n\n i=0\n for part in dofs:\n rot_dict[part] = sp.eye(3)\n if dofs[part][1]:\n rot_dict[part] = rot_y(theta[i]) @ rot_dict[part]\n if dofs[part][0]:\n rot_dict[part] = rot_x(phi[i]) @ rot_dict[part]\n if dofs[part][2]:\n rot_dict[part] = rot_z(psi[i]) @ rot_dict[part]\n \n rot_dict[part + \"_i\"] = rot_dict[part].T\n i+=1\n \n x, y, z = sp.symbols(\"x y z\")\n dx, dy, dz = sp.symbols(\"\\\\dot{x} \\\\dot{y} \\\\dot{z}\")\n ddx, ddy, ddz = sp.symbols(\"\\\\ddot{x} \\\\ddot{y} \\\\ddot{z}\")\n\n for link in links:\n if len(link) == 1:\n pose_dict[link[0]] = sp.Matrix([x, y, z])\n else:\n if link[0] not in pose_dict:\n pose_dict[link[0]] = sp.Matrix([x, y, z])\n\n translation_vec = sp.Matrix([positions[link[1]][0] - positions[link[0]][0],\n positions[link[1]][1] - positions[link[0]][1],\n positions[link[1]][2] - positions[link[0]][2]])\n rot_dict[link[1]] = rot_dict[link[1]] @ rot_dict[link[0]]\n rot_dict[link[1]+\"_i\"] = rot_dict[link[1]+\"_i\"].T\n pose_dict[link[1]] = pose_dict[link[0]] + rot_dict[link[0] + \"_i\"] @ translation_vec\n \n t_poses = []\n for pose in pose_dict:\n t_poses.append(pose_dict[pose].T)\n \n t_poses_mat = sp.Matrix(t_poses)\n\n func_map = {\"sin\":sin, \"cos\":cos, \"ImmutableDenseMatrix\":np.array} \n sym_list = [x, y, z, *phi, *theta, *psi]\n pose_to_3d = sp.lambdify(sym_list, t_poses_mat, modules=[func_map])\n pos_funcs = []\n\n for i in range(t_poses_mat.shape[0]):\n lamb = sp.lambdify(sym_list, t_poses_mat[i,:], modules=[func_map])\n pos_funcs.append(lamb)\n \n scene_path = os.path.join(project_dir, \"scene_sba.json\")\n\n K_arr, D_arr, R_arr, t_arr, _ = utils.load_scene(scene_path)\n D_arr = D_arr.reshape((-1,4))\n\n markers_dict = dict(enumerate(markers))\n\n print(f\"\\n\\n\\nLoading data\")\n\n df_paths = sorted(glob.glob(os.path.join(project_dir, '*.h5')))\n points_2d_df = utils.create_dlc_points_2d_file(df_paths)\n\n def get_meas_from_df(n, c, l, d):\n n_mask = points_2d_df[\"frame\"]== n-1\n l_mask = points_2d_df[\"marker\"]== markers[l-1]\n c_mask = points_2d_df[\"camera\"]== c-1\n d_idx = {1:\"x\", 2:\"y\"}\n val = points_2d_df[n_mask & l_mask & c_mask]\n return val[d_idx[d]].values[0]\n\n def get_likelihood_from_df(n, c, l):\n n_mask = points_2d_df[\"frame\"]== n-1\n l_mask = points_2d_df[\"marker\"]== markers[l-1]\n c_mask = points_2d_df[\"camera\"]== c-1\n val = points_2d_df[n_mask & l_mask & c_mask]\n return val[\"likelihood\"].values[0]\n \n h = 1/120 #timestep\n start_frame = 80 # 50\n N = 110\n P = 3 + len(phi)+len(theta)+len(psi)\n L = len(pos_funcs)\n C = len(K_arr)\n D2 = 2\n D3 = 3\n\n proj_funcs = [pt3d_to_x2d, pt3d_to_y2d]\n\n R = 5 # measurement standard deviation\n Q = np.array([ # model parameters variance\n 4.0,\n 7.0,\n 5.0,\n 13.0,\n 32.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 9.0,\n 18.0,\n 43.0,\n 53.0,\n 90.0,\n 118.0,\n 247.0,\n 186.0,\n 194.0,\n 164.0,\n 295.0,\n 243.0,\n 334.0,\n 149.0,\n 26.0,\n 12.0,\n 0.0,\n 34.0,\n 43.0,\n 51.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0,\n 0.0\n ])**2\n\n triangulate_func = calib.triangulate_points_fisheye\n points_2d_filtered_df = points_2d_df[points_2d_df['likelihood']>0.5]\n points_3d_df = calib.get_pairwise_3d_points_from_df(points_2d_filtered_df, K_arr, D_arr, R_arr, t_arr, triangulate_func)\n\n # estimate initial points\n nose_pts = points_3d_df[points_3d_df[\"marker\"]==\"nose\"][[\"x\", \"y\", \"z\", \"frame\"]].values\n x_slope, x_intercept, *_ = stats.linregress(nose_pts[:,3], nose_pts[:,0])\n y_slope, y_intercept, *_ = stats.linregress(nose_pts[:,3], nose_pts[:,1])\n z_slope, z_intercept, *_ = stats.linregress(nose_pts[:,3], nose_pts[:,2])\n frame_est = np.arange(N)\n x_est = frame_est*x_slope + x_intercept\n y_est = frame_est*y_slope + y_intercept\n z_est = frame_est*z_slope + z_intercept\n print(x_est.shape)\n psi_est = np.arctan2(y_slope, x_slope)\n \n print(\"Started Optimisation\")\n m = ConcreteModel(name = \"Skeleton\")\n\n # ===== SETS =====\n m.N = RangeSet(N) #number of timesteps in trajectory\n m.P = RangeSet(P) #number of pose parameters (x, y, z, phi_1..n, theta_1..n, psi_1..n)\n m.L = RangeSet(L) #number of labels\n m.C = RangeSet(C) #number of cameras\n m.D2 = RangeSet(D2) #dimensionality of measurements\n m.D3 = RangeSet(D3) #dimensionality of measurements\n\n def init_meas_weights(model, n, c, l):\n likelihood = get_likelihood_from_df(n+start_frame, c, l)\n if likelihood > 0.5:\n return 1/R\n else:\n return 0\n m.meas_err_weight = Param(m.N, m.C, m.L, initialize=init_meas_weights, mutable=True) # IndexError: index 0 is out of bounds for axis 0 with size 0\n\n def init_model_weights(m, p):\n #if Q[p-1] != 0.0:\n #return 1/Q[p-1]\n #else:\n return 0.01\n m.model_err_weight = Param(m.P, initialize=init_model_weights)\n\n m.h = h\n\n def init_measurements_df(m, n, c, l, d2):\n return get_meas_from_df(n+start_frame, c, l, d2)\n m.meas = Param(m.N, m.C, m.L, m.D2, initialize=init_measurements_df)\n\n resultsfilename='C://Users//user-pc//Desktop//pwpoints.pickle'\n with open(resultsfilename, 'rb') as f:\n data=pickle.load(f)\n \n index_dict = {\"nose\":23, \"neck_base\":24, \"spine\":6, \"tail_base\":22, \"tail1\":11,\n \"tail2\":12, \"l_shoulder\":13,\"l_front_knee\":14,\"l_front_ankle\":15,\"r_shoulder\":2,\n \"r_front_knee\":3, \"r_front_ankle\":4,\"l_hip\":17,\"l_back_knee\":18, \"l_back_ankle\":19,\n \"r_hip\":7,\"r_back_knee\":8,\"r_back_ankle\":9}\n\n pair_dict = {\"nose\":[0,1,24], \"neck_base\":[6,13,2], \"spine\":[24,22], \"tail_base\":[6,7,17], \"tail1\":[22,12],\n \"tail2\":[11,22], \"l_shoulder\":[24,14],\"l_front_knee\":[13,15],\"l_front_ankle\":[14,16],\"r_shoulder\":[24,3],\n \"r_front_knee\":[2,4], \"r_front_ankle\":[3,5],\"l_hip\":[18,22],\"l_back_knee\":[17,19], \"l_back_ankle\":[18,20],\n \"r_hip\":[8,22],\"r_back_knee\":[7,9],\"r_back_ankle\":[8,10]}\n \n \n def init_pw_measurements(m, n, c, l, d2):\n val=0\n if n-1 >= 20 and n-1 < 30:\n fn = 10*(c-1)+(n-20)-1\n x=data[fn]['pose'][0::3]\n y=data[fn]['pose'][1::3]\n xpw=data[fn]['pws'][:,:,:,0]\n ypw=data[fn]['pws'][:,:,:,1]\n marker = markers[l-1]\n base = pair_dict[marker][1]\n if d2==1:\n val=x[base]+xpw[0,base,index_dict[marker]]\n elif d2==2:\n val=y[base]+ypw[0,base,index_dict[marker]]\n #sum/=len(pair_dict[marker])\n return val\n else:\n return get_meas_from_df(n+start_frame, c, l, d2)\n \n m.pw_meas = Param(m.N, m.C, m.L, m.D2, initialize=init_pw_measurements, within=Any)\n \"\"\"\n def init_pw_measurements2(m, n, c, l, d2):\n val=0\n if n-1 >= 20 and n-1 < 30:\n fn = 10*(c-1)+(n-20)-1\n x=data[fn]['pose'][0::3]\n y=data[fn]['pose'][1::3]\n xpw=data[fn]['pws'][:,:,:,0]\n ypw=data[fn]['pws'][:,:,:,1]\n marker = markers[l-1]\n if \"ankle\" in marker:\n base = pair_dict[marker][1]\n if d2==1:\n val=x[base]+xpw[0,base,index_dict[marker]]\n elif d2==2:\n val=y[base]+ypw[0,base,index_dict[marker]]\n #sum/=len(pair_dict[marker])\n return val\n else:\n return(0.0)\n \n m.pw_meas2 = Param(m.N, m.C, m.L, m.D2, initialize=init_pw_measurements2, within=Any)\n \"\"\"\n # ===== VARIABLES =====\n m.x = Var(m.N, m.P) #position\n m.dx = Var(m.N, m.P) #velocity\n m.ddx = Var(m.N, m.P) #acceleration\n m.poses = Var(m.N, m.L, m.D3)\n m.slack_model = Var(m.N, m.P)\n m.slack_meas = Var(m.N, m.C, m.L, m.D2, initialize=0.0)\n\n\n # ===== VARIABLES INITIALIZATION =====\n init_x = np.zeros((N-start_frame, P))\n init_x[:,0] = x_est[start_frame: start_frame+N] #x\n init_x[:,1] = y_est[start_frame: start_frame+N] #y\n init_x[:,2] = z_est[start_frame: start_frame+N] #z\n #init_x[:,(3+len(pos_funcs)*2)] = psi_est #yaw - psi\n init_dx = np.zeros((N, P))\n init_ddx = np.zeros((N, P))\n for n in range(1,N+1):\n for p in range(1,P+1):\n if n<len(init_x): #init using known values\n m.x[n,p].value = init_x[n-1,p-1]\n m.dx[n,p].value = init_dx[n-1,p-1]\n m.ddx[n,p].value = init_ddx[n-1,p-1]\n else: #init using last known value\n m.x[n,p].value = init_x[-1,p-1]\n m.dx[n,p].value = init_dx[-1,p-1]\n m.ddx[n,p].value = init_ddx[-1,p-1]\n #init pose\n var_list = [m.x[n,p].value for p in range(1, P+1)]\n for l in range(1,L+1):\n [pos] = pos_funcs[l-1](*var_list)\n for d3 in range(1,D3+1):\n m.poses[n,l,d3].value = pos[d3-1]\n\n # ===== CONSTRAINTS =====\n # 3D POSE\n def pose_constraint(m,n,l,d3):\n #get 3d points\n var_list = [m.x[n,p] for p in range(1, P+1)]\n [pos] = pos_funcs[l-1](*var_list)\n return pos[d3-1] == m.poses[n,l,d3]\n \n m.pose_constraint = Constraint(m.N, m.L, m.D3, rule=pose_constraint)\n\n def backwards_euler_pos(m,n,p): # position\n if n > 1:\n # return m.x[n,p] == m.x[n-1,p] + m.h*m.dx[n-1,p] + m.h**2 * m.ddx[n-1,p]/2\n return m.x[n,p] == m.x[n-1,p] + m.h*m.dx[n,p]\n\n else:\n return Constraint.Skip\n m.integrate_p = Constraint(m.N, m.P, rule = backwards_euler_pos)\n\n\n def backwards_euler_vel(m,n,p): # velocity\n if n > 1:\n return m.dx[n,p] == m.dx[n-1,p] + m.h*m.ddx[n,p]\n else:\n return Constraint.Skip \n m.integrate_v = Constraint(m.N, m.P, rule = backwards_euler_vel)\n\n m.angs = ConstraintList()\n for n in range(1,N):\n for i in range(3, 3*len(positions)):\n m.angs.add(expr=(abs(m.x[n,i]) <= np.pi/2))\n\n # MODEL\n def constant_acc(m, n, p):\n if n > 1:\n return m.ddx[n,p] == m.ddx[n-1,p] + m.slack_model[n,p]\n else:\n return Constraint.Skip \n m.constant_acc = Constraint(m.N, m.P, rule = constant_acc)\n\n # MEASUREMENT \n def measurement_constraints(m, n, c, l, d2):\n #project\n K, D, R, t = K_arr[c-1], D_arr[c-1], R_arr[c-1], t_arr[c-1]\n x, y, z = m.poses[n,l,1], m.poses[n,l,2], m.poses[n,l,3]\n return proj_funcs[d2-1](x, y, z, K, D, R, t) - m.meas[n, c, l, d2] - m.slack_meas[n, c, l, d2] ==0\n m.measurement = Constraint(m.N, m.C, m.L, m.D2, rule = measurement_constraints)\n \n def pw_measurement_constraints(m, n, c, l, d2):\n #project\n if n-1 >= 20 and n-1 < 30:\n K, D, R, t = K_arr[c-1], D_arr[c-1], R_arr[c-1], t_arr[c-1]\n x, y, z = m.poses[n,l,1], m.poses[n,l,2], m.poses[n,l,3]\n return proj_funcs[d2-1](x, y, z, K, D, R, t) - m.pw_meas[n, c, l, d2] - m.slack_meas[n, c, l, d2] ==0.0\n else:\n return(Constraint.Skip)\n m.pw_measurement = Constraint(m.N, m.C, m.L, m.D2, rule = pw_measurement_constraints)\n \"\"\"\n def pw_measurement_constraints2(m, n, c, l, d2):\n #project\n if n-1 >= 20 and n-1 < 30 and \"ankle\" in markers[l-1]:\n K, D, R, t = K_arr[c-1], D_arr[c-1], R_arr[c-1], t_arr[c-1]\n x, y, z = m.poses[n,l,1], m.poses[n,l,2], m.poses[n,l,3]\n return proj_funcs[d2-1](x, y, z, K, D, R, t) - m.pw_meas2[n, c, l, d2] - m.slack_meas[n, c, l, d2] ==0.0\n else:\n return(Constraint.Skip)\n m.pw_measurement2 = Constraint(m.N, m.C, m.L, m.D2, rule = pw_measurement_constraints2)\n \"\"\"\n def obj(m):\n slack_model_err = 0.0\n slack_meas_err = 0.0\n for n in range(1, N+1):\n #Model Error\n for p in range(1, P+1):\n slack_model_err += m.model_err_weight[p] * m.slack_model[n, p] ** 2\n #Measurement Error\n for l in range(1, L+1):\n for c in range (1, C+1):\n for d2 in range(1, D2+1):\n if n-1 >= 20 and n-1 < 30:\n slack_meas_err += redescending_loss(1/30 * m.slack_meas[n, c, l, d2], 3, 5, 15)\n else:\n slack_meas_err += redescending_loss(m.meas_err_weight[n, c, l] * m.slack_meas[n, c, l, d2], 3, 10, 20)\n return slack_meas_err + slack_model_err\n\n m.obj = Objective(rule = obj)\n\n return(m, pose_to_3d)",
"def solve_motion_equations(M, B, state_vars=[], input_vars=[], parameters_values=dict()):\n\n M_shape = M.shape\n B_shape = B.shape\n assert(M_shape[0] == B_shape[0])\n\n # at first we create a buffer for the string that we complete and execute \n # to dynamically define a function and return it\n fnc_str_buffer = '''\ndef f(x, u, uuref, t, pp):\n # System variables\n %s # x_str\n %s # u_str\n \n # Parameters\n %s # par_str\n \n # Sympy Common Expressions\n %s # cse_str\n\n # Vectorfield\n %s # ff_str\n \n return ff\n'''\n\n #################################\n # handle system state variables #\n #################################\n # --> leads to x_str which shows how to unpack the state variables\n x_str = ''\n for var in state_vars:\n x_str += '%s, '%str(var)\n\n # as a last we remove the trailing '; ' to avoid syntax erros\n x_str = x_str + '= x'\n\n ##########################\n # handle input variables #\n ##########################\n # --> leads to u_str which will show how to unpack the inputs of the control system\n u_str = ''\n for var in input_vars:\n u_str += '%s, '%str(var)\n\n # after we remove the trailing '; ' to avoid syntax errors x_str will look like:\n # 'u1, u2, ... , um = u'\n u_str = u_str + '= u'\n\n ############################\n # handle system parameters #\n ############################\n # --> leads to par_str\n par_str = ''\n for k, v in list(parameters_values.items()):\n # 'k' is the name of a system parameter such as mass or gravitational acceleration\n # 'v' is its value in SI units\n par_str += '%s = %s; '%(str(k), str(v))\n\n # as a last we remove the trailing '; ' from par_str to avoid syntax errors\n par_str = par_str[:-2]\n\n # now solve the motion equations w.r.t. the accelerations\n sol = M.solve(B)\n\n # use SymPy's Common Subexpression Elimination\n cse_list, cse_res = sp.cse(sol, symbols=sp.numbered_symbols('q'))\n\n ################################\n # handle common subexpressions #\n ################################\n # --> leads to cse_str\n cse_str = ''\n #cse_list = [(str(l), str(r)) for l, r in cse_list]\n for cse_pair in cse_list:\n cse_str += '%s = %s; '%(str(cse_pair[0]), str(cse_pair[1]))\n\n # add result of cse\n for i in range(M_shape[0]):\n cse_str += 'q%d_dd = %s; '%(i, str(cse_res[0][i]))\n\n cse_str = cse_str[:-2]\n\n ######################\n # create vectorfield #\n ######################\n # --> leads to ff_str\n ff_str = 'ff = ['\n\n for i in range(M_shape[0]):\n ff_str += '%s, '%str(state_vars[2*i+1])\n ff_str += 'q%s_dd, '%(i)\n\n # remove trailing ',' and add closing brackets\n ff_str = ff_str[:-2] + ']'\n\n ############################\n # Create callable function #\n ############################\n # now we can replace all placeholders in the function string buffer\n fnc_str = fnc_str_buffer%(x_str, u_str, par_str, cse_str, ff_str)\n # and finally execute it which will create a python function 'f'\n # pass the current global scope to exec(). this is necessary so that sympy functions like cos/sin can be used\n globals_locals = globals()\n exec(fnc_str, globals_locals)\n\n # now we have defined a callable function that can be used within PyTrajectory\n return globals_locals['f']",
"def solve(self,init=None,g_init=1e-3,g_step=5e-3,g_fin=None,evol=False,movingGrid=False):\n if(g_fin==None): g_fin=self.g\n #Check if all signs are correct\n if(g_fin<0):\n if(g_step>0): g_step*=-1.\n if(g_init>0): g_init*=-1.\n else:\n if(g_step<0): g_step*=-1.\n if(g_init<0): g_step*=-1.\n\n #If no initial distribution is given, start from the BCS ground state\n if(init==None): init=[1 if i<self.N else 0 for i in range(self.n)]\n var_init=np.array([-2.*init[i]-g_init/(1-2.*init[i])*np.sum([self.XXZ.Z(j,i)*(init[j]-init[i]) for j in range(self.n) if j!=i]) for i in range(self.n)])\n n_step=int((g_fin-g_init)/g_step)\n g=g_init\n\n #Define necessary variables if evol or movingGrid=True\n if(evol or movingGrid):\n var_evol=np.zeros([n_step,self.n])\n g_evol=np.zeros(n_step)\n if(movingGrid):\n rap_evol = np.zeros([n_step,self.N],dtype=complex)\n rap_evol[0] = [self.levels[i] for i in range(self.n) if init[i]!=0 ]\n rap=np.array([self.levels[i]+0.5*np.abs(np.random.rand()) for i in range(self.n) if init[i]!=0])\n grid=np.zeros(self.N+1,dtype=complex)\n grid[0]=1e3\n for k in range(self.N): grid[k+1]=rap[k]\n n_grid=n_step/20 #Calculates rapidities at 20 intermediate steps\n\n #Gradually increase the coupling constant g and solve iteratively at each step starting from the Taylor approximation from the previous step\n for i in range(n_step):\n var_new=self.newtonraphson(g,var_init)\n der=self.get_derivative(var_new,g)\n #var_init=self.taylor_expansion(g,g_step,var_new)\n var_init = var_new+g_step*der\n g+=g_step\n #print g\n\n #Save variables at current step if evol =True\n if(evol or movingGrid):\n var_evol[i]=var_init\n g_evol[i]=g\n if(movingGrid and i%n_grid==0 and i!=0):\n #Method for obtaining the rapidities starting from the set of Lambda_i\n rf=RootFinder(self.XXZ,var_evol[i]/g_evol[i],g_evol[i],self.N)\n u=rf.solveForU(grid)\n lm=LaguerreMethod(grid,u)\n rap=lm.laguerre()\n rap_evol[i]=np.sort(lm.laguerre())\n for k in range(self.N): grid[k+1]=rap[k]\n grid[0]=10*max(rap)\n elif(movingGrid and i!=0):\n rf=RootFinder(self.XXZ,var_evol[i]/g_evol[i],g_evol[i],self.N)\n u=rf.solveForU(grid)\n lm=LaguerreMethod(grid,u)\n rap_evol[i]=np.sort(lm.laguerre())\n \n \n #One final iterative solution at g=g_fin\n self.solution=self.newtonraphson(g_fin,var_init)\n #Calculate the occupation numbers\n self.occupation=0.5*(-1.-self.solution+g_fin*self.get_derivative(self.solution,g_fin))\n\n #One final calculation of the rapidities\n if(movingGrid):\n rf=RootFinder(self.XXZ,self.solution/g_fin,g_fin,self.N)\n u=rf.solveForU(grid)\n lm=LaguerreMethod(grid,u)\n rap=lm.laguerre()\n self.rapidities=rap\n\n if movingGrid: return [g_evol,var_evol,rap_evol]\n if evol: return [g_evol,var_evol]\n return self.solution",
"def make_model_g2b_math(traj_size: int = 731):\n # Build the position model\n position_model = make_position_model_g2b_math(traj_size=traj_size)\n \n # Build the model with this position layer and the input trajectory size\n return make_physics_model_g2b(position_model=position_model, traj_size=traj_size)",
"def test_sim(self):\n nxfe = 4\n ipopt = get_solver(\"ipopt\")\n\n m_steady = self.make_steady_model(nfe=nxfe)\n self.fix_model_inlets(m_steady, inlet_pressure=50.0 * pyo.units.bar)\n m_steady.fs.compressor.boost_pressure[:].fix(7.0 * pyo.units.bar)\n ipopt.solve(m_steady, tee=True)\n time_steady = m_steady.fs.time\n scalar_data = self.get_scalar_data_from_model(m_steady, time_steady)\n initial_data = self.get_data_from_model_at_time(m_steady, time_steady)\n\n m = pyo.ConcreteModel()\n default = {\n \"dynamic\": True,\n \"time_set\": [0.0, 20.0],\n \"time_units\": pyo.units.hr,\n }\n m.fs = idaes.FlowsheetBlock(**default)\n m.fs.properties = NaturalGasParameterBlock()\n pipeline_config = {\n \"property_package\": m.fs.properties,\n \"finite_elements\": nxfe,\n }\n m.fs.pipeline = GasPipeline(**pipeline_config)\n pipeline = m.fs.pipeline\n compressor_config = {\"property_package\": m.fs.properties}\n m.fs.compressor = Compressor(**compressor_config)\n compressor = m.fs.compressor\n m._compressor_to_pipeline = Arc(\n ports=(compressor.outlet_port, pipeline.inlet_port),\n )\n expand_arcs = pyo.TransformationFactory(\"network.expand_arcs\")\n expand_arcs.apply_to(m)\n\n cv = m.fs.pipeline.control_volume\n assert_units_consistent(m)\n\n disc = pyo.TransformationFactory(\"dae.finite_difference\")\n ntfe = 20\n disc.apply_to(m, nfe=ntfe, wrt=m.fs.time, scheme=\"BACKWARD\")\n\n time = m.fs.time\n t0 = m.fs.time.first()\n x0 = cv.length_domain.first()\n xf = cv.length_domain.last()\n j = next(iter(m.fs.properties.component_list))\n\n # Fix geometry variables\n m.fs.pipeline.diameter.fix(0.92 * pyo.units.m)\n cv.length.fix(300.0 * pyo.units.km)\n\n # Fix boost pressure\n compressor.boost_pressure[:].fix()\n\n # Inlets to the compressor are fixed, except for flow, where\n # the outlet is fixed.\n state = compressor.inlet_state\n state[:].pressure.fix()\n state[:].mole_frac_comp[j].fix()\n state[:].temperature.fix()\n cv.flow_mass[:, xf].fix()\n\n # Fix initial conditions. Here, pressure and volume for all\n # non-specified points.\n for x in cv.length_domain:\n if x != x0:\n cv.pressure[t0, x].fix()\n if x != xf:\n cv.flow_mass[t0, x].fix()\n\n # I want to deactivate differential equations at (t0, xf)\n # Material balance already doesn't exist here.\n cv.momentum_balance[t0, xf].deactivate()\n\n self.assertEqual(degrees_of_freedom(m), 0)\n\n # Load initial steady state into model at all time points.\n for name, val in initial_data.items():\n var = m.find_component(name)\n for t in time:\n var[t].set_value(val)\n # Load scalar data from initial steady state\n # (initialize area, basically)\n for name, val in scalar_data.items():\n var = m.find_component(name)\n var.set_value(val)\n\n cv.material_accumulation[...].set_value(0.0)\n cv.flow_mass_dt[...].set_value(0.0)\n\n for con in large_residuals_set(m):\n resid = pyo.value(con.body - con.upper)\n print(resid, con.name)\n ipopt.solve(m, tee=True)\n\n # Load input sequence into model\n sample_points = [4.0, 20.0]\n input_name = \"fs.pipeline.control_volume.flow_mass[*,1.0]\"\n nominal_density = 0.72\n val = 12.0 * 1e6 / 24 * nominal_density # 12 (1e6 SCM)/day\n input_series_data = (\n sample_points,\n {input_name: [val, val]},\n )\n input_interval_data = interval_data_from_time_series(input_series_data)\n load_inputs_into_model(m, time, input_interval_data)\n # Solve with loaded inputs\n res = ipopt.solve(m, tee=True)\n self.assertIs(\n res.solver.termination_condition,\n pyo.TerminationCondition.optimal,\n )\n\n # These predicted values come from a simulation of a single pipeline\n # model from the Pyomo DAE example. flow_mass has been converted\n # to kg/hr from (1e4 SCM/hr) by a factor of 0.72*1e4, where\n # 0.72 kg/m**3 is the gas density at standard conditions.\n pred_values = (\n list(time),\n {\n \"fs.pipeline.control_volume.flow_mass[*,%s]\"\n % x0: [\n 3.000e5,\n 2.999e5,\n 2.999e5,\n 2.999e5,\n 3.000e5,\n 3.174e5,\n 3.301e5,\n 3.389e5,\n 3.449e5,\n 3.492e5,\n 3.523e5,\n 3.544e5,\n 3.560e5,\n 3.571e5,\n 3.579e5,\n 3.585e5,\n 3.589e5,\n 3.592e5,\n 3.594e5,\n 3.595e5,\n 3.597e5,\n ],\n \"fs.pipeline.control_volume.pressure[*,%s]\"\n % xf: [\n 50.90,\n 50.90,\n 50.90,\n 50.90,\n 50.90,\n 49.83,\n 49.31,\n 48.95,\n 48.69,\n 48.51,\n 48.38,\n 48.29,\n 48.22,\n 48.17,\n 48.14,\n 48.11,\n 48.10,\n 48.08,\n 48.07,\n 48.07,\n 48.06,\n ],\n \"fs.compressor.power[*]\": [\n 1.590e3,\n 1.590e3,\n 1.590e3,\n 1.590e3,\n 1.590e3,\n 1.682e3,\n 1.750e3,\n 1.796e3,\n 1.828e3,\n 1.851e3,\n 1.867e3,\n 1.878e3,\n 1.887e3,\n 1.892e3,\n 1.897e3,\n 1.900e3,\n 1.902e3,\n 1.904e3,\n 1.905e3,\n 1.906e3,\n 1.906e3,\n ],\n },\n )\n output_names = [\n \"fs.pipeline.control_volume.flow_mass[*,%s]\" % x0,\n \"fs.pipeline.control_volume.pressure[*,%s]\" % xf,\n \"fs.compressor.power[*]\",\n ]\n actual_values = (\n list(time),\n {\n name: [var.value for var in m.find_component(name).values()]\n for name in output_names\n },\n )\n # Note: We fail with a reltol of 0.01, due to flow rate discrepancies\n # in positions 6, 7, 8, and 9. A reltol of 0.02 seems reasonable to me.\n self.assertStructuredAlmostEqual(pred_values, actual_values, reltol=0.02)",
"def getorbit(sat, tfinal, tstep, trec):\n ntimes = (int)(tfinal/tstep)\n n_tvals = (int)(tfinal/trec)\n state_arr = np.zeros((6, n_tvals))\n orbelem_arr = np.zeros((6, n_tvals))\n s_major_arr = np.zeros(n_tvals)\n count = 0\n for i in range(ntimes):\n sat.rk4_step_sat(tstep)\n if i % (trec/tstep) == 0:\n state_arr[:, count] = sat.getstate()\n orbelem_arr[:, count] = sat.orb_elem()\n s_major_arr[count] = sat.get_a()\n tether = sat.get_tether()\n tether.setlamda_a(sat)\n tether.set_iv(sat)\n print state_arr[0, count]\n print count\n count += 1\n return (state_arr, orbelem_arr, s_major_arr)",
"def CoordTrans(frame1, frame2, original_vec, oe=np.zeros(6), \n theta_gst=float('NaN'), lla_gs=np.zeros(3), mu=c.mu_earth, \n r_body=c.r_earth):\n\n # Orbital Elements\n a, e, inc, raan, w, nu = oe\n\n # Warnings\n oe_frames = ['ric', 'ntw', 'pqw']\n if any(frame in oe_frames for frame in (frame1, frame2)):\n if oe.dot(oe) == 0:\n print('ERROR: You forgot to define the orbital elements!')\n\n topocentric_frames = ['sez']\n if any(frame in topocentric_frames for frame in (frame1, frame2)):\n if lla_gs.dot(lla_gs) == 0:\n print('ERROR: You forgot lla for the ground stations!')\n\n # Coordinate System Logic\n if frame1.lower() == 'bci':\n if frame2.lower() == 'bcbf':\n rotated_vec = bci2bcbf(original_vec, theta_gst)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'ric':\n rotated_vec = bci2ric(original_vec, raan, inc, w, nu)\n \n elif frame2.lower() == 'ntw':\n rotated_vec = bci2ntw(original_vec, e, raan, inc, w, nu)\n \n elif frame2.lower() == 'pqw':\n rotated_vec = bci2pqw(original_vec, raan, inc, w)\n \n elif frame2.lower() == 'lla':\n rotated_vec1 = bci2bcbf(original_vec, theta_gst)\n rotated_vec = bcbf2lla(rotated_vec1, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'sez':\n rotated_vec1 = bci2bcbf(original_vec, theta_gst)\n rotated_vec = bcbf2sez(rotated_vec1, lla_gs, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n\n else:\n print('ERROR: Frame2 is not included in this function!')\n\n elif frame1.lower() == 'bcbf':\n if frame2.lower() == 'bci':\n rotated_vec = bcbf2bci(original_vec, theta_gst)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'ric':\n rotated_vec1 = bcbf2bci(original_vec, theta_gst)\n rotated_vec = bci2ric(rotated_vec1, raan, inc, w, nu)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'ntw':\n rotated_vec1 = bcbf2bci(original_vec, theta_gst)\n rotated_vec = bci2ntw(rotated_vec1, e, raan, inc, w, nu)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'pqw':\n rotated_vec1 = bcbf2bci(original_vec, theta_gst)\n rotated_vec = bci2pqw(rotated_vec1, raan, inc, w)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'lla':\n rotated_vec = bcbf2lla(original_vec, r_body=r_body)\n \n elif frame2.lower() == 'sez':\n rotated_vec = bcbf2sez(original_vec, lla_gs, r_body=r_body)\n\n else:\n print('ERROR: Frame2 is not included in this function!')\n\n elif frame1.lower() == 'ric':\n rotated_vec1 = ric2bci(original_vec, raan, inc, w, nu)\n if frame2.lower() == 'bcbf':\n rotated_vec = bci2bcbf(rotated_vec1, theta_gst)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'bci':\n rotated_vec = rotated_vec1\n \n elif frame2.lower() == 'ntw':\n rotated_vec = bci2ntw(rotated_vec1, e, raan, inc, w, nu)\n \n elif frame2.lower() == 'pqw':\n rotated_vec = bci2pqw(rotated_vec1, raan, inc, w)\n \n elif frame2.lower() == 'lla':\n rotated_vec2 = bci2bcbf(rotated_vec1, theta_gst)\n rotated_vec = bcbf2lla(rotated_vec2, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'sez':\n rotated_vec2 = bci2bcbf(rotated_vec1, theta_gst)\n rotated_vec = bcbf2sez(rotated_vec2, lla_gs, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n\n else:\n print('ERROR: Frame2 is not included in this function!')\n\n elif frame1.lower() == 'ntw':\n rotated_vec1 = ntw2bci(original_vec, e, raan, inc, w, nu)\n if frame2.lower() == 'bcbf':\n rotated_vec = bci2bcbf(rotated_vec1, theta_gst)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'ric':\n rotated_vec = bci2ric(rotated_vec1, raan, inc, w, nu)\n \n elif frame2.lower() == 'bci':\n rotated_vec = rotated_vec1\n \n elif frame2.lower() == 'pqw':\n rotated_vec = bci2pqw(rotated_vec1, raan, inc, w)\n \n elif frame2.lower() == 'lla':\n rotated_vec2 = bci2bcbf(rotated_vec1, theta_gst)\n rotated_vec = bcbf2lla(rotated_vec2, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'sez':\n rotated_vec2 = bci2bcbf(rotated_vec1, theta_gst)\n rotated_vec = bcbf2sez(rotated_vec2, lla_gs, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n\n else:\n print('ERROR: Frame2 is not included in this function!')\n\n elif frame1.lower() == 'pqw':\n rotated_vec1 = pqw2bci(original_vec, raan, inc, w)\n if frame2.lower() == 'bcbf':\n rotated_vec = bci2bcbf(rotated_vec1, theta_gst)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'ric':\n rotated_vec = bci2ric(rotated_vec1, raan, inc, w, nu)\n \n elif frame2.lower() == 'ntw':\n rotated_vec = bci2ntw(rotated_vec1, e, raan, inc, w, nu)\n \n elif frame2.lower() == 'bci':\n rotated_vec = rotated_vec1\n \n elif frame2.lower() == 'lla':\n rotated_vec2 = bci2bcbf(rotated_vec1, theta_gst)\n rotated_vec = bcbf2lla(rotated_vec2, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'sez':\n rotated_vec2 = bci2bcbf(rotated_vec1, theta_gst)\n rotated_vec = bcbf2sez(rotated_vec2, lla_gs, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n\n else:\n print('ERROR: Frame2 is not included in this function!')\n\n elif frame1.lower() == 'lla':\n rotated_vec1 = lla2bcbf(original_vec, r_body=r_body)\n if frame2.lower() == 'bcbf':\n rotated_vec = rotated_vec1\n \n elif frame2.lower() == 'ric':\n rotated_vec2 = bcbf2bci(rotated_vec1, theta_gst)\n rotated_vec = bci2ric(rotated_vec2, raan, inc, w, nu)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'ntw':\n rotated_vec2 = bcbf2bci(rotated_vec1, theta_gst)\n rotated_vec = bci2ntw(rotated_vec2, e, raan, inc, w, nu)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'pqw':\n rotated_vec2 = bcbf2bci(rotated_vec1, theta_gst)\n rotated_vec = bci2pqw(rotated_vec2, raan, inc, w)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'bci':\n rotated_vec = bcbf2bci(rotated_vec1, theta_gst)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'sez':\n rotated_vec = bcbf2sez(rotated_vec1, lla_gs, r_body=r_body)\n\n else:\n print('ERROR: Frame2 is not included in this function!')\n\n elif frame1.lower() == 'sez':\n rotated_vec1 = sez2bcbf(original_vec, lla_gs, r_body=r_body)\n rotated_vec2 = bcbf2bci(rotated_vec1, theta_gst)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n if frame2.lower() == 'bcbf':\n rotated_vec = rotated_vec1\n \n elif frame2.lower() == 'ric':\n rotated_vec = bci2ric(rotated_vec2, raan, inc, w, nu)\n \n elif frame2.lower() == 'ntw':\n rotated_vec = bci2ntw(rotated_vec2, e, raan, inc, w, nu)\n \n elif frame2.lower() == 'pqw':\n rotated_vec = bci2pqw(rotated_vec2, raan, inc, w)\n \n elif frame2.lower() == 'lla':\n rotated_vec = bcbf2lla(rotated_vec1, r_body=r_body)\n \n elif frame2.lower() == 'bci':\n rotated_vec = rotated_vec2\n\n else:\n print('ERROR: Frame2 is not included in this function!')\n\n else:\n print('ERROR: Frame1 is not included in this function!')\n\n return rotated_vec",
"def mercier(self):\n\n # See Overleaf note \"Mercier criterion near the magnetic axis- detailed notes\".\n # See also \"20200604-02 Checking sign in Mercier DGeod near axis.docx\"\n\n # Shorthand:\n d_l_d_phi = self.d_l_d_phi\n B0 = self.B0\n G0 = self.G0\n p2 = self.p2\n etabar = self.etabar\n curvature = self.curvature\n sigma = self.sigma\n iotaN = self.iotaN\n iota = self.iota\n pi = np.pi\n\n #integrand = d_l_d_phi * (Y1c * Y1c + X1c * (X1c + Y1s)) / (Y1c * Y1c + (X1c + Y1s) * (X1c + Y1s))\n integrand = d_l_d_phi * (etabar*etabar*etabar*etabar + curvature*curvature*curvature*curvature*sigma*sigma + etabar*etabar*curvature*curvature) \\\n / (etabar*etabar*etabar*etabar + curvature*curvature*curvature*curvature*(1+sigma*sigma) + 2*etabar*etabar*curvature*curvature)\n\n integral = np.sum(integrand) * self.d_phi * self.nfp * 2 * pi / self.axis_length\n\n #DGeod_times_r2 = -(2 * sG * spsi * mu0 * mu0 * p2 * p2 * G0 * G0 * G0 * G0 * etabar * etabar &\n self.DGeod_times_r2 = -(2 * mu0 * mu0 * p2 * p2 * G0 * G0 * G0 * G0 * etabar * etabar \\\n / (pi * pi * pi * B0 * B0 * B0 * B0 * B0 * B0 * B0 * B0 * B0 * B0 * iotaN * iotaN)) \\\n * integral\n\n self.d2_volume_d_psi2 = 4*pi*pi*abs(G0)/(B0*B0*B0)*(3*etabar*etabar - 4*self.B20_mean/B0 + 2 * (self.G2 + iota * self.I2)/G0)\n\n self.DWell_times_r2 = (mu0 * p2 * abs(G0) / (8 * pi * pi * pi * pi * B0 * B0 * B0)) * \\\n (self.d2_volume_d_psi2 - 8 * pi * pi * mu0 * p2 * abs(G0) / (B0 * B0 * B0 * B0 * B0))\n\n self.DMerc_times_r2 = self.DWell_times_r2 + self.DGeod_times_r2",
"def __init__(self, init_pose=None, init_velocities=None, \n init_angle_velocities=None, runtime=5., target_pos=None):\n \n # initial state\n self.state_scale = 1\n \n self.init_pose = np.concatenate((truncnorm.rvs(-1,1,0,1./3.,3), truncnorm.rvs(-0.021,0.021,0,0.007,3)))\n self.init_pose[2] += 10\n self.init_velocities = np.array([0.,0.,0.])\n self.init_angle_velocities = np.array([0.,0.,0.])\n\n self.runtime = runtime\n \n # Simulation\n self.sim = PhysicsSim(self.init_pose, self.init_velocities, self.init_angle_velocities, self.runtime) \n self.action_repeat = 1\n\n self.init_state = np.concatenate((self.init_pose,self.init_velocities,self.init_angle_velocities),axis=0)\n self.state_size = self.action_repeat * self.init_state.shape[0]\n \n self.action_low = 0 #-1\n self.action_high = 2*450 #1\n self.action_size = 4\n\n self.action_scale = 1 #450 # 1/2 max of the action \n #self.state_scale = 150 # 1/2 size of the state space\n \n # Goal\n self.target_pose = np.array([0.,0.,150.0])\n\n # The previous position\n self.prev_pose = self.init_pose",
"def construct_basis_tensors(self):\n\t\n\tu = np.array([self.cth*self.cphi, self.cth*self.sphi, -self.sth])\n\tv = np.array([self.sphi, -self.cphi, 0.0])\n\n\tep = np.outer(u,u) - np.outer(v,v)\n\tec = np.outer(u,v) + np.outer(v,u)\n\t\n\tself.ep = self.c2psi*ep - self.s2psi*ec\n\tself.ec = self.s2psi*ep + self.c2psi*ec\n\t\t\n\treturn",
"def orbit_cross():\n \n # potential\n ham = gp.Hamiltonian(gp.MilkyWayPotential(nucleus=dict(m=0), halo=dict(c=0.95, m=7E11), bulge=dict(m=4E9), disk=dict(m=5.5e10)))\n gc_frame = coord.Galactocentric(galcen_distance=8*u.kpc, z_sun=0*u.pc)\n \n # orbital solution\n pos = np.load('/home/ana/projects/GD1-DR2/data/gd1_orbit.npy')\n phi1, phi2, d, pm1, pm2, vr = pos\n\n c = gc.GD1(phi1=phi1*u.deg, phi2=phi2*u.deg, distance=d*u.kpc, \n pm_phi1_cosphi2=pm1*u.mas/u.yr,\n pm_phi2=pm2*u.mas/u.yr,\n radial_velocity=vr*u.km/u.s)\n w0 = gd.PhaseSpacePosition(c.transform_to(gc_frame).cartesian)\n \n dt = 0.5 * u.Myr\n n_steps = 250\n fit_orbit = ham.integrate_orbit(w0, dt=dt, n_steps=120)\n\n # find gap 6D location at present\n gap_phi0 = -40*u.deg\n model_gd1 = fit_orbit.to_coord_frame(gc.GD1, galactocentric_frame=gc_frame)\n gap_i = np.abs(model_gd1.phi1.wrap_at(180*u.deg) - gap_phi0).argmin()\n gap_w0 = fit_orbit[gap_i]\n \n # gap orbit\n t1 = 0*u.Myr\n t2 = -1*u.Gyr\n dt = -0.5\n t = np.arange(t1.to(u.Myr).value, t2.to(u.Myr).value+dt, dt)\n gap_orbit = ham.integrate_orbit(gap_w0, dt=dt, t1=t1, t2=t2)\n \n \n # plot relative distances as a function of time\n plt.close()\n plt.figure(figsize=(9,5))\n \n lw = 3\n\n # show classicals\n tcls = Table.read('../data/positions_classical.fits')\n ra, dec, d, pmra, pmdec, vr = tcls['ra'], tcls['dec'], tcls['distance'], tcls['pmra'], tcls['pmdec'], tcls['vr']\n cs = coord.ICRS(ra=ra, dec=dec, distance=d, pm_ra_cosdec=pmra, pm_dec=pmdec, radial_velocity=vr)\n ws = gd.PhaseSpacePosition(cs.transform_to(gc_frame).cartesian)\n satellite_orbit = ham.integrate_orbit(ws, dt=dt, t1=t1, t2=t2)\n for e in range(len(tcls)):\n if e==0:\n label = 'Classical\\ndwarfs'\n else:\n label = ''\n rel_distance = np.linalg.norm(gap_orbit.xyz - satellite_orbit.xyz[:,:,e], axis=0)*gap_orbit.xyz[0].unit\n plt.plot(t, rel_distance, '-', color=mpl.cm.Reds(0.9), alpha=0.5, label=label, lw=lw)\n \n # show ultrafaints\n tufd = Table.read('../data/positions_ufd.fits')\n ra, dec, d, pmra, pmdec, vr = tufd['ra'], tufd['dec'], tufd['distance'], tufd['pmra'], tufd['pmdec'], tufd['vr']\n cs = coord.ICRS(ra=ra, dec=dec, distance=d, pm_ra_cosdec=pmra, pm_dec=pmdec, radial_velocity=vr)\n ws = gd.PhaseSpacePosition(cs.transform_to(gc_frame).cartesian)\n satellite_orbit = ham.integrate_orbit(ws, dt=dt, t1=t1, t2=t2)\n for e in range(len(tufd)):\n if e==0:\n label = 'Ultra-faint\\ndwarfs'\n else:\n label = ''\n rel_distance = np.linalg.norm(gap_orbit.xyz - satellite_orbit.xyz[:,:,e], axis=0)*gap_orbit.xyz[0].unit\n plt.plot(t, rel_distance, '-', color=mpl.cm.Reds(0.7), alpha=0.5, label=label, lw=lw)\n \n # show globulars\n tgc = Table.read('../data/positions_globular.fits')\n ra, dec, d, pmra, pmdec, vr = tgc['ra'], tgc['dec'], tgc['distance'], tgc['pmra'], tgc['pmdec'], tgc['vr']\n cs = coord.ICRS(ra=ra, dec=dec, distance=d, pm_ra_cosdec=pmra, pm_dec=pmdec, radial_velocity=vr)\n ws = gd.PhaseSpacePosition(cs.transform_to(gc_frame).cartesian)\n satellite_orbit = ham.integrate_orbit(ws, dt=dt, t1=t1, t2=t2)\n for e in range(len(tgc)):\n if e==0:\n label = 'Globular\\nclusters'\n else:\n label = ''\n rel_distance = np.linalg.norm(gap_orbit.xyz - satellite_orbit.xyz[:,:,e], axis=0)*gap_orbit.xyz[0].unit\n plt.plot(t, rel_distance, '-', color=mpl.cm.Reds(0.5), alpha=0.5, label=label, lw=lw)\n\n plt.plot(t, np.abs(gap_orbit.xyz[2]), '-', color=mpl.cm.Reds(0.3), alpha=0.5, label='Disk', lw=lw, zorder=0)\n #plt.plot(t, np.sqrt(gap_orbit.xyz[0]**2 + gap_orbit.xyz[1]**2), 'r-', alpha=0.2)\n\n plt.ylim(0.1,200)\n plt.gca().set_yscale('log')\n \n plt.legend(loc=2, fontsize='small', markerscale=2)\n plt.xlabel('Time [Myr]')\n plt.ylabel('Relative distance [kpc]')\n \n plt.tight_layout()\n plt.savefig('../plots/satellite_distances.png', dpi=200)\n plt.savefig('../paper/satellite_distances.pdf')",
"def reaction_forces(Ca, la, x1, x2, x3, xa, h, d1, d3, theta, P, q, E, I):\r\n \r\n equation_matrix = np.array([[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], \r\n [1, 0, 0, 1, 0, 1, 0, np.sin(theta), 0, 0, 0, 0, (P*np.sin(theta)+q*la*np.cos(theta))], \r\n [0, 1, 0, 0, 1, 0, 1, np.cos(theta), 0, 0, 0, 0, (P*np.cos(theta)-q*la*np.sin(theta))],\r\n \r\n [-(Ca/4-h/2), 0, 0, -(Ca/4-h/2) ,0 , -(Ca/4-h/2), 0, (np.cos(theta)*h/2-np.sin(theta)*Ca/4), 0, 0, 0, 0, (P*np.cos(theta)*h/2*-P*np.sin(theta)*Ca/4)], \r\n [0, (x2-x1), 0, 0, 0, 0, -(x3-x2), (np.cos(theta)*xa/2), 0, 0, 0, 0, (-P*np.cos(theta)*xa/2+q*la*np.sin(theta)*(la/2-x2))], \r\n [-(x2-x1), 0, 0, 0, 0, (x3-x2), 0, -np.sin(theta)*xa/2, 0, 0, 0, 0, (P*np.sin(theta)*xa/2+q*la*np.cos(theta)*(la/2-x2))], \r\n \r\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, x1, 1, -q*np.sin(theta)*((x1**4)/24)], \r\n [0, ((x2-x1)**3)/6, 0, 0, 0, 0, 0, ((np.cos(theta))*((xa/2)**3)/6), 0, 0, x2, 1, (-q*np.sin(theta)*((x2**4)/24))], \r\n [0, ((x3-x1)**3)/6, 0, 0, ((x3-x2)**3)/6, 0, 0, ((np.cos(theta))*((x3-x2+xa/2)**3)/6), 0, 0, x3, 1, (-q*np.sin(theta)*((x3**4)/24)+P*(np.cos(theta))*(x3-x2-xa/2)**3/6)], \r\n [0, 0, 0, 0, 0, 0, 0, 0, x1, 1, 0, 0, (-E*I*d1*+q*np.cos(theta)*(x1**4)/24)], \r\n [(((x2-x1)**3)/6), 0, 0, 0, 0, 0, 0, ((-np.sin(theta))*((xa/2)**3)/6), x2, 1, 0, 0, (q*np.cos(theta)*(x2**4)/24)], \r\n [(((x3-x1)**3)/6),0,0,(((x3-x2)**3)/6),0,0,0,((-np.sin(theta))*((x3-x2+xa/2)**3)/6),x3,1,0,0,(-E*I*d3*+q*np.cos(theta)*((x3**4)/24)+P/6*np.sin(theta)*(x3-x2-xa/2)**3)]])\r\n \r\n \r\n unknown_matrix = equation_matrix[:,:-1]\r\n constant_matrix = equation_matrix[:,-1]\r\n \r\n \r\n solution_matrix = np.linalg.solve(unknown_matrix,constant_matrix)\r\n \r\n solution_matrix = solution_matrix/1000\r\n \r\n (R1y, R1z, R2x, R2y, R2z, R3y, R3z, RI, c1, c2, c3, c4) = tuple(solution_matrix)\r\n \r\n print((R1y, R1z, R2x, R2y, R2z, R3y, R3z, RI, c1, c2, c3, c4))",
"def efSolver2(self):\n dx = self.dh[0] # dx\n dy = self.dh[1] # dy\n dz = self.dh[2] # dz\n \n \"\"\"\n for i in np.arange(0, self.ni):\n for j in np.arange(0, self.nj):\n for k in np.arange(0, self.nk):\n \"\"\"\n\n ##x-component#\n #if i==0: \n #x-component#\n \"\"\"\n if i==0: \n # forward\n self.ef[i][j][k][0] = -(-3*self.phi[i][j][k]+\\\n 4*self.phi[i+1][j][k]-\\\n self.phi[i+2][j][k])/(2*dx)\n \"\"\"\n \n # forward\n self.ef[0,0:self.nj,0:self.nk,0] = -(-3*self.phi[0,0:self.nj,0:self.nk]+\\\n 4*self.phi[1,0:self.nj,0:self.nk]-\\\n self.phi[2,0:self.nj,0:self.nk])/(2*dx)\n \n #elif i==self.ni-1: \n \"\"\"\n elif i==self.ni-1: \n # backward\n self.ef[i][j][k][0] = -(self.phi[i-2][j][k]-\\\n 4*self.phi[i-1][j][k]+\\\n 3*self.phi[i][j][k])/(2*dx)\n \"\"\" \n # backward\n self.ef[self.ni-1,0:self.nj,0:self.nk,0] = -(self.phi[self.ni-3,0:self.nj,0:self.nk]-\\\n 4*self.phi[self.ni-2,0:self.nj,0:self.nk]+\\\n 3*self.phi[self.ni-1,0:self.nj,0:self.nk])/(2*dx)\n \"\"\"\n else: \n #central\n self.ef[i][j][k][0] = -(self.phi[i+1][j][k] - \\\n self.phi[i-1][j][k])/(2*dx)\n \"\"\" \n #central\n self.ef[1:self.ni-1,0:self.nj,0:self.nk,0] = -(self.phi[2:self.ni,0:self.nj,0:self.nk] - \\\n self.phi[0:self.ni-2,0:self.nj,0:self.nk])/(2*dx)\n\n\n #y-component\n #if j==0:\n \"\"\"\n if j==0:\n self.ef[i][j][k][1] = -(-3*self.phi[i][j][k] + \\\n 4*self.phi[i][j+1][k]-\\\n self.phi[i][j+2][k])/(2*dy)\n \n \"\"\"\n self.ef[0:self.ni,0,0:self.nk,1] = -(-3*self.phi[0:self.ni,0,0:self.nk] + \\\n 4*self.phi[0:self.ni,1,0:self.nk]-\\\n self.phi[0:self.ni,2,0:self.nk])/(2*dy)\n #elif j==self.nj-1:\n \"\"\"\n elif j==self.nj-1:\n self.ef[i][j][k][1] = -(self.phi[i][j-2][k] - \\\n 4*self.phi[i][j-1][k] +\\\n 3*self.phi[i][j][k])/(2*dy)\n \n \"\"\"\n self.ef[0:self.ni,self.nj-1,0:self.nk,1] = -(self.phi[0:self.ni,self.nj-3,0:self.nk] - \\\n 4*self.phi[0:self.ni,self.nj-2,0:self.nk] +\\\n 3*self.phi[0:self.ni,self.nj-1,0:self.nk])/(2*dy)\n #else:\n \"\"\"\n else:\n self.ef[i][j][k][1] = -(self.phi[i][j+1][k] - \\\n self.phi[i][j-1][k])/(2*dy)\n\n \"\"\"\n self.ef[0:self.ni,1:self.nj-1,0:self.nk,1] = -(self.phi[0:self.ni,2:self.nj,0:self.nk] - \\\n self.phi[0:self.ni,0:self.nj-2,0:self.nk])/(2*dy)\n\n #z-component\n '''\n if k==0:\n self.ef[i][j][k][2] = -(-3*self.phi[i][j][k] + \\\n 4*self.phi[i][j][k+1]-\n self.phi[i][j][k+2])/(2*dz)\n \n '''\n #z-component\n #if k==0:\n self.ef[0:self.ni,0:self.nj,0,2] = -(-3*self.phi[0:self.ni,0:self.nj,0] + \\\n 4*self.phi[0:self.ni,0:self.nj,1]-\n self.phi[0:self.ni,0:self.nj,2])/(2*dz)\n\n \"\"\"\n elif k==self.nk-1:\n self.ef[i][j][k][2] = -(self.phi[i][j][k-2] - \\\n 4*self.phi[i][j][k-1] + \\\n 3*self.phi[i][j][k])/(2*dz)\n \"\"\"\n \n #elif k==self.nk-1:\n self.ef[0:self.ni,0:self.nj,self.nk-1,2] = -(self.phi[0:self.ni,0:self.nj,self.nk-3] - \\\n 4*self.phi[0:self.ni,0:self.nj,self.nk-2] + \\\n 3*self.phi[0:self.ni,0:self.nj,self.nk-1])/(2*dz) \n \"\"\"\n else:\n self.ef[i][j][k][2] = -(self.phi[i][j][k+1] - \\\n self.phi[i][j][k-1])/(2*dz)\n \"\"\"\n #else:\n self.ef[0:self.ni,0:self.nj,1:self.nk-1,2] = -(self.phi[0:self.ni,0:self.nj,2:self.nk] - \\\n self.phi[0:self.ni,0:self.nj,0:self.nk-2])/(2*dz)",
"def GeoVector(body, time, aberration):\n if body == Body.Moon:\n return GeoMoon(time)\n\n if body == Body.Earth:\n return Vector(0.0, 0.0, 0.0, time)\n\n if not aberration:\n # No aberration, so calculate Earth's position once, at the time of observation.\n earth = _CalcEarth(time)\n\n # Correct for light-travel time, to get position of body as seen from Earth's center.\n ltime = time\n for iter in range(10):\n h = HelioVector(body, ltime)\n if aberration:\n # Include aberration, so make a good first-order approximation\n # by backdating the Earth's position also.\n # This is confusing, but it works for objects within the Solar System\n # because the distance the Earth moves in that small amount of light\n # travel time (a few minutes to a few hours) is well approximated\n # by a line segment that substends the angle seen from the remote\n # body viewing Earth. That angle is pretty close to the aberration\n # angle of the moving Earth viewing the remote body.\n # In other words, both of the following approximate the aberration angle:\n # (transverse distance Earth moves) / (distance to body)\n # (transverse speed of Earth) / (speed of light).\n earth = _CalcEarth(ltime)\n\n geo = Vector(h.x-earth.x, h.y-earth.y, h.z-earth.z, time)\n ltime2 = time.AddDays(-geo.Length() / C_AUDAY)\n dt = abs(ltime2.tt - ltime.tt)\n if dt < 1.0e-9:\n return geo\n\n ltime = ltime2\n\n raise Error('Light-travel time solver did not converge: dt={}'.format(dt))"
] | [
"0.60584325",
"0.5861145",
"0.56984997",
"0.5582579",
"0.5571899",
"0.5571185",
"0.55394447",
"0.55196565",
"0.5519342",
"0.5471972",
"0.54458904",
"0.5429235",
"0.5407269",
"0.5400973",
"0.53884035",
"0.5383207",
"0.535556",
"0.5349774",
"0.53446734",
"0.5344",
"0.5325683",
"0.5319006",
"0.5313806",
"0.53045964",
"0.5297905",
"0.52909774",
"0.5290813",
"0.52893394",
"0.52773994",
"0.52585685"
] | 0.6602906 | 0 |
Turns bytes into unicode, if needed. Uses UTF8. | def asunicode(s):
if isinstance(s, bytes):
return s.decode('utf-8', 'replace')
else:
return s | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def to_unicode(data):\n if isinstance(data, bytes):\n return data.decode('utf-8')\n else:\n return data",
"def utf8tounicode(arg):\n\n try:\n if isinstance(arg, unicode):\n return arg.decode('utf-8')\n except NameError:\n pass # Python 3\n return arg",
"def _force_unicode(data):\n try:\n data = unicode(data, \"utf-8\")\n except UnicodeDecodeError:\n data = unicode(data, \"latin1\")\n return data",
"def to_unicode(value):\r\n if isinstance(value, _TO_UNICODE_TYPES):\r\n return value\r\n if not isinstance(value, bytes_type):\r\n raise TypeError(\r\n \"Expected bytes, unicode, or None; got %r\" % type(value)\r\n )\r\n return value.decode(\"utf-8\")",
"def to_unicode(s, encoding=\"utf-8\"):\n if isinstance(s, six.text_type):\n return s\n elif isinstance(s, bytes):\n return s.decode(encoding)\n # TODO: warning? Exception?\n return s",
"def cast_unicode(s, encoding='utf-8'):\n if isinstance(s, bytes) and not PY3:\n return s.decode(encoding, \"replace\")\n return s",
"def ensure_unicode(data, encoding=\"utf8\"):\n if isinstance(data, bytes):\n return data.decode(encoding)\n else:\n return unicode_type(data)",
"def unicode2utf8(s):\n return s.encode(encoding='utf-8', errors='ignore')",
"def to_unicode(text, encoding='utf-8', errors='strict'):\r\n if isinstance(text, bytes):\r\n return text.decode(encoding, errors=errors)\r\n return text",
"def to_unicode(x):\n try: # This may never fail, but let's be safe\n encoding = locale.getpreferredencoding()\n except:\n encoding = 'utf-8'\n ret = x.decode(encoding, 'replace').encode('utf-8')\n return ret",
"def unicode2utf8(arg):\n\n try:\n if isinstance(arg, unicode):\n return arg.encode('utf-8')\n except NameError:\n pass # Python 3\n return arg",
"def bytes_to_unicode():\r\n _chr = unichr if sys.version_info[0] == 2 else chr\r\n bs = list(range(ord(\"!\"), ord(\"~\")+1))+list(range(ord(\"¡\"), ord(\"¬\")+1))+list(range(ord(\"®\"), ord(\"ÿ\")+1))\r\n cs = bs[:]\r\n n = 0\r\n for b in range(2**8):\r\n if b not in bs:\r\n bs.append(b)\r\n cs.append(2**8+n)\r\n n += 1\r\n cs = [_chr(n) for n in cs]\r\n return dict(zip(bs, cs))",
"def to_unicode(string):\n assert isinstance(string, basestring)\n if sys.version_info[0] >= 3:\n if isinstance(string, bytes):\n return string.decode('utf-8')\n else:\n return string\n else:\n if isinstance(string, str):\n return string.decode('utf-8')\n else:\n return string",
"def _as_unicode(s):\n if isinstance(s, str):\n return s\n # Assume it is a bytes string\n # Note ISO-8859-1 aka Latin-1 preserves first 256 chars\n return codecs.latin_1_decode(s)[0]",
"def convert_to_unicode(text):\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\n \"Unsupported string type: %s\" % (type(text))\n ) # pragma: no cover",
"def convertFromUnicode(content):\n return content",
"def force_unicode(s):\n return (s.decode('utf8')\n if isinstance(s, str)\n else unicode(s))",
"def _to_unicode(text):\n # both str and unicode inherit from basestring\n if not isinstance(text, basestring):\n tmpl = 'expected UTF-8 encoded string or unicode, got %s value %s'\n raise TypeError(tmpl % (type(text), text))\n # return unicode strings unchanged\n if isinstance(text, unicode):\n return text\n # otherwise assume UTF-8 encoding, which also works for ASCII\n return unicode(text, 'utf-8')",
"def bytes_to_unicode():\n _chr = chr\n bs = list(range(ord(\"!\"), ord(\"~\") + 1)) + list(\n range(ord(\"¡\"), ord(\"¬\") + 1)) + list(range(ord(\"®\"), ord(\"ÿ\") + 1))\n cs = bs[:]\n n = 0\n for b in range(2 ** 8):\n if b not in bs:\n bs.append(b)\n cs.append(2 ** 8 + n)\n n += 1\n cs = [_chr(n) for n in cs]\n return dict(zip(bs, cs))",
"def make_unicode(string):\n if sys.version < '3' and isinstance(string, str):\n return unicode(string.decode('utf-8'))\n\n return string",
"def decode_to_unicode(content):\n if content:\n try:\n # Try to decode ISO-8859-1 to unicode\n return content.decode(\"ISO-8859-1\")\n except UnicodeEncodeError:\n # Assume content is unicode already\n return content",
"def convert_to_unicode(text):\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text.decode(\"utf-8\", \"ignore\")\n elif isinstance(text, unicode):\n return text\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")",
"def convert_to_unicode(text):\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text.decode(\"utf-8\", \"ignore\")\n elif isinstance(text, unicode):\n return text\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")",
"def _unicode(arr):\n try:\n return unicode(arr)\n except UnicodeEncodeError:\n dt = arr.dtype.newbyteorder('S')\n return unicode(arr.view(dt))",
"def ToUnicode(val):\n if sys.version_info[0] >= 3:\n return val\n return val if isinstance(val, unicode) else val.decode('utf-8')",
"def utf8(value):\n if isinstance(value, (bytes, type(None))):\n return value\n if not isinstance(value, unicode_type):\n raise TypeError(\n \"Expected bytes, unicode, or None; got %r\" % type(value)\n )\n return value.encode(\"utf-8\")",
"def convert_to_unicode(text):\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s, %s\" % (type(text), text))\n elif six.PY2:\n if isinstance(text, str):\n return text.decode(\"utf-8\", \"ignore\")\n elif isinstance(text, unicode):\n return text\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")",
"def _convert_string_to_unicode(string):\n result = string\n\n try:\n if string is not None and not isinstance(string, six.text_type):\n result = string.decode(\"utf-8\")\n except (TypeError, UnicodeDecodeError, AttributeError):\n # Sometimes the string actually is binary or StringIO object,\n # so if you can't decode it, just give up.\n pass\n\n return result",
"def force_utf8(text):\n if isinstance(text, binary_type):\n return text\n else:\n return text.encode('utf-8')",
"def FromUnicode(val):\n if sys.version_info[0] >= 3:\n return val\n return val if isinstance(val, str) else val.encode('utf-8')"
] | [
"0.7771352",
"0.7608568",
"0.760092",
"0.7586902",
"0.75841767",
"0.75071144",
"0.7426475",
"0.7411024",
"0.73730016",
"0.7314761",
"0.7290108",
"0.723653",
"0.7233475",
"0.72172374",
"0.7215756",
"0.7209994",
"0.719679",
"0.7185164",
"0.71597666",
"0.71575105",
"0.7137322",
"0.7117638",
"0.7117638",
"0.70972735",
"0.70614606",
"0.7058952",
"0.70483774",
"0.70379806",
"0.7030333",
"0.70287263"
] | 0.7844327 | 0 |
load_ticker Retrieves market data from external data source (in this case Bloomberg) | def load_ticker(self, time_series_request):
time_series_request_vendor = self.construct_vendor_time_series_request(time_series_request)
data_frame = None
self.logger.info("Request Bloomberg data")
# do we need daily or intraday data?
if (time_series_request.freq in ['daily', 'weekly', 'monthly', 'quarterly', 'yearly']):
# for events times/dates separately needs ReferenceDataRequest (when specified)
if 'release-date-time-full' in time_series_request.fields:
# experimental
datetime_data_frame = self.get_reference_data(time_series_request_vendor, time_series_request)
# remove fields 'release-date-time-full' from our request (and the associated field in the vendor)
index = time_series_request.fields.index('release-date-time-full')
time_series_request_vendor.fields.pop(index)
time_series_request.fields.pop(index)
# download all the other event fields (uses HistoricalDataRequest to Bloomberg)
# concatenate with date time fields
if len(time_series_request_vendor.fields) > 0:
events_data_frame = self.get_daily_data(time_series_request, time_series_request_vendor)
col = events_data_frame.index.name
events_data_frame = events_data_frame.reset_index(drop = False)
data_frame = pandas.concat([events_data_frame, datetime_data_frame], axis = 1)
temp = data_frame[col]
del data_frame[col]
data_frame.index = temp
else:
data_frame = datetime_data_frame
# for all other daily/monthly/quarter data, we can use HistoricalDataRequest to Bloomberg
else:
data_frame = self.get_daily_data(time_series_request, time_series_request_vendor)
# assume one ticker only
# for intraday data we use IntradayDataRequest to Bloomberg
if (time_series_request.freq in ['tick', 'intraday', 'second', 'minute', 'hourly']):
time_series_request_vendor.tickers = time_series_request_vendor.tickers[0]
if time_series_request.freq in ['tick', 'second']:
data_frame = self.download_tick(time_series_request_vendor)
else:
data_frame = self.download_intraday(time_series_request_vendor)
if data_frame is not None:
if data_frame.empty:
self.logger.info("No tickers returned for: " + time_series_request_vendor.tickers)
return None
cols = data_frame.columns.values
data_frame = data_frame.tz_localize('UTC')
cols = time_series_request.tickers[0] + "." + cols
data_frame.columns = cols
self.logger.info("Completed request from Bloomberg.")
return data_frame | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_data(self):\n try:\n df = self.live_quote_arg_func(self.tickers)\n for index, ticker in enumerate(self.tickers):\n ticker_info = df.loc[index]\n self.ticker_dict[ticker].append(ticker_info['price'],\n ticker_info['volume'],\n ticker_info['amount'],\n ticker_info['time'])\n except Exception:\n raise ValueError('Polling thread exception')",
"def stock_data(ticker, start,today=date.today()):\n df= web.DataReader(ticker,'yahoo',start,today)\n return df",
"def gettickerdata(tickername):\n\n r = requests.get(constants.bloomurl + getticker(tickername) + ':US')\n soup = BeautifulSoup(r.text, 'html.parser')\n results = soup.find_all('div', class_=\"price\")\n return (\"$\" + results[0].text)",
"async def fetch_ticker(self, symbol: str, params={}):\n await self.load_markets()\n market = self.market(symbol)\n request = {\n 'symbol': market['id'],\n }\n ticker = await self.publicGetTickerSymbol(self.extend(request, params))\n return self.parse_ticker(ticker, market)",
"def get_price_data(ticker, days_befoure):\r\n #config_file=raw_input('config file: ')\r\n config_file=\"d:/tmp/moex.json\" \r\n try:\r\n with open(config_file) as config_file: \r\n conn_data = json.load(config_file)\r\n except:\r\n print \"Error: Unable to read config file. \"\r\n sys.exit(1)\r\n\r\n username = conn_data['username']\r\n password = conn_data['password']\r\n my_config = Config(user=username, password=password, proxy_url='')\r\n\r\n my_auth = MicexAuth(my_config)\r\n date = datetime.datetime.now() - datetime.timedelta(days_befoure)\r\n \r\n #ticker = 'SBER' # for tesing...\r\n \r\n if my_auth.is_real_time():\r\n iss = MicexISSClient(my_config, my_auth, MyDataHandler, MyData)\r\n iss.get_history_securities('stock',\r\n 'shares',\r\n 'tqbr',\r\n ticker, \r\n date.strftime(\"%Y-%m-%d\")\r\n #here to be start end dates\r\n )\r\n #print iss.handler.data.history\r\n return iss.handler.data.as_dataframe()",
"def retrieve_company_data(self):\n self.set_stock_sym_append_str('')\n self.set_stock_retrieval_type('all') #'all', watcher\n self.load_stock_symbol_fr_file()",
"def get_btc_ticker(self):\n return self.execute_http_call(\"/api/ticker\", \"GET\", headers=None)",
"def get_data_logic():\r\n global input_exchange\r\n global input_symbols\r\n global all_symbols\r\n global input_timeframe\r\n\r\n # create exchange connection\r\n exchange = Exchange(input_exchange)\r\n\r\n # perform check that exchange can grab price data\r\n if exchange.connection.has['fetchOHLCV']:\r\n\r\n # user ticked 'All Symbols?', so includes all symbols in\r\n # exchange_tickers.py for the particular exchange\r\n if all_symbols:\r\n symbol_list = SymbolList(symbols='auto', exchange=exchange)\r\n # user didn't tick 'All Symbols?', so create unpopulated symbol list\r\n else:\r\n symbol_list = SymbolList(exchange=exchange)\r\n # add all symbols user inputted\r\n for s in input_symbols:\r\n symbol_list.input_symbol(s)\r\n\r\n # get auto timeframe and check it is valid\r\n timeframe = Timeframe(timeframe=input_timeframe, exchange=exchange)\r\n while not timeframe.check_timeframe():\r\n timeframe.input_timeframe() # default to asking for input\r\n\r\n print(f\"Pulling data on the {timeframe.tf} timeframe for...\")\r\n print(symbol_list.symbols)\r\n\r\n # get current time in UTC in milliseconds\r\n now = datetime.now().astimezone(pytz.timezone('UTC'))\r\n now = int(now.timestamp()*1000)\r\n\r\n # loop over each symbol and pull new data\r\n for sym in symbol_list.symbols:\r\n # create csv filename and path\r\n file_sym = sym.replace('/', '')\r\n file_sym = file_sym.replace('-', '')\r\n filename = f\"{exchange.name}_{file_sym}_{timeframe.tf}.csv\" # generate filename from given information\r\n csv_path = f\"{exchange.name}/{timeframe.tf}/{filename}\"\r\n\r\n # get most recent price data and append it to existing data\r\n # (if it exists)\r\n price_data = PriceData(exchange=exchange, tf=timeframe.tf,\r\n sym=sym, now=now, path=csv_path)\r\n\r\n # check if price data csv already exists\r\n if price_data.exists():\r\n price_data.get_current()\r\n # get new data as far back as possible if csv does not exist\r\n else:\r\n price_data.get_new()\r\n\r\n # keep updating price_data until current time\r\n price_data.update()\r\n\r\n # write to csv\r\n price_data.write()\r\n\r\n print(\"Finished writing files!\")",
"def ticker(self, param=None):\r\n\t\tdata = self._get('ticker/', query=param)\r\n\t\treturn data",
"async def fetch_ticker(self, symbol: str, params={}):\n await self.load_markets()\n market = self.market(symbol)\n request = {\n 'symbol': market['id'],\n }\n response = await self.publicGetInstrument(self.extend(request, params))\n ticker = self.safe_value(response, 0)\n if ticker is None:\n raise BadSymbol(self.id + ' fetchTicker() symbol ' + symbol + ' not found')\n return self.parse_ticker(ticker, market)",
"def get_data(retrieve = False, start='2019-01-01', comp = False):\r\n if retrieve == True:\r\n tickers = retrieve_sp500()\r\n else:\r\n with open('sp500_tickers.pickle', 'rb') as file:\r\n tickers = pickle.load(file)\r\n if not os.path.exists('sp500_data'):\r\n os.mkdir('sp500_data')\r\n exchg_close = dt.time(16,0,0,0)\r\n # use todays date if markets have closed.\r\n if dt.datetime.today().time() > exchg_close:\r\n end = dt.datetime.now()\r\n # use yesterdays dates if markets have not yet closed.\r\n else: \r\n end = dt.datetime.now() - dt.timedelta(1)\r\n for ticker in tickers:\r\n # updates data for tickers not currently stored.\r\n if not os.path.exists('sp500_data/{}.csv'.format(ticker)):\r\n df = pdr.get_data_yahoo(ticker, start, end)\r\n df.to_csv('sp500_data/{}.csv'.format(ticker))\r\n # updates data for tickers that have not been updated today.\r\n elif dt.datetime.fromtimestamp(os.path.getmtime('sp500_data/{}.csv'.format(ticker))).day != dt.datetime.today().day:\r\n df = pdr.get_data_yahoo(ticker, start, end)\r\n df.to_csv('sp500_data/{}.csv'.format(ticker))\r\n # prints out data that was not and does not need udpating.\r\n else:\r\n print('{} is already saved'.format(ticker))\r\n if comp == True:\r\n compile_data()",
"def scraper_bloomberg() -> None:\n\thtmltext = urllib.urlopen('http://www.bloomberg.com/markets/watchlist/recent-ticker/AAPL:US')\n\tdata = json.load(htmltext)\n\n\tprint data[\"last_price\"]\n\n\treturn None",
"def remote_load_price(symbol):\n URL = \"http://finance.yahoo.com/webservice/v1/symbols/{}/quote?format=json&view=detail\"\n try:\n jsonObj = json.loads(request.urlopen(URL.format(symbol)).read().decode(\"UTF-8\"))['list']['resources'][0]['resource']['fields']\n except IndexError:\n raise RuntimeError(\"The stock with symbol {} can't be found!\".format(symbol))\n return RemoteStockData(jsonObj[\"symbol\"], jsonObj[\"name\"], jsonObj[\"price\"], jsonObj[\"change\"])",
"def get_data(ticker, tickers):\n \n print(ticker)\n ## Date setting\n today = datetime.today()\n days_ago_90 = today - timedelta(days = 90)\n today = today.strftime(\"%Y-%m-%d\")\n days_ago_90 = days_ago_90.strftime(\"%Y-%m-%d\")\n \n df_ticker = web.DataReader(ticker, 'yahoo', start = days_ago_90, end = today)\n \n ## To get prices, iloc is used. It's because shifting by timedetlas will result in error in cases where some holidays occured \n price_most_recent = df_ticker.iloc[-1, 5]\n price_7_days_ago = df_ticker.iloc[-7, 5]\n price_21_days_ago = df_ticker.iloc[-21, 5]\n price_30_days_ago = df_ticker.iloc[-30, 5]\n price_90_days_ago = df_ticker.iloc[0,5]\n \n ## Getting price change\n price_change_7_days = price_change(price_most_recent, price_7_days_ago)\n price_change_21_days = price_change(price_most_recent, price_21_days_ago)\n price_change_30_days = price_change(price_most_recent, price_30_days_ago)\n price_change_90_days = price_change(price_most_recent, price_90_days_ago)\n \n ## Checking for constant price drop\n constant_price_drop_7 = constant_price_drop_detector(df_ticker, 7)\n ## Only if price drops constantly for 7 days it makes sense to check for this pattern in 21 days period\n if constant_price_drop_7 == \"YES\":\n constant_price_drop_21 = constant_price_drop_detector(df_ticker, 21)\n else:\n constant_price_drop_21 = \"NO\"\n \n ## Now creating the final df to return\n df_prices = df_ticker[['Adj Close']].T\n df_prices.index = [ticker]\n df_prices.reset_index(inplace = True)\n \n full_name = tickers.loc[tickers[\"Ticker\"] == ticker, 'Full Name'].values[0]\n df_prices['company_name'] = full_name\n df_prices['price_90_days_ago'] = price_90_days_ago\n df_prices['price_30_days_ago'] = price_30_days_ago\n df_prices['price_21_days_ago'] = price_21_days_ago\n df_prices['price_7_days_ago'] = price_7_days_ago\n df_prices['price_most_recent'] = price_most_recent\n \n df_prices['price_change_7_days'] = price_change_7_days\n df_prices['price_change_21_days'] = price_change_21_days\n df_prices['price_change_30_days'] = price_change_30_days\n df_prices['price_change_90_days'] = price_change_90_days\n \n df_prices['constant_price_drop_7'] = constant_price_drop_7\n df_prices['constant_price_drop_21'] = constant_price_drop_21\n \n df_prices.fillna(\"None\", inplace = True)\n \n return df_prices",
"def get_data(ticker, interval, start_date, end_date):\r\n # Display indication\r\n print('[INFO] {} - Retrieving {}_{} historical data'.format(get_now(), ticker, interval))\r\n # Download ticker's ohlcv\r\n ohlcv = yf.download(tickers=ticker, start=start_date, end=end_date, interval=interval)\r\n # Modify dataframe\r\n ohlcv.drop(columns=['Adj Close'], inplace=True)\r\n ohlcv.sort_index(axis=0, ascending=False, inplace=True)\r\n ohlcv.reset_index(inplace=True)\r\n if \"Datetime\" in ohlcv.columns:\r\n ohlcv['Datetime'] = ohlcv['Datetime'].astype(str).str[:-9]\r\n return ohlcv",
"def get_ticker(self, ticker_symbol):\n return",
"def book_ticker(self, symbol=''):\n params = {\n 'symbol': symbol,\n }\n return self._quote_get('ticker/bookTicker', params=params)",
"def fetch_ticker(self, symbol: str, params={}):\n self.load_markets()\n market = self.market(symbol)\n request = {\n 'pair': market['id'],\n }\n response = self.publicGetExchangesPairTicker(self.extend(request, params))\n return self.parse_ticker(response, market)",
"async def fetch_ticker(self, symbol: str, params={}):\n await self.load_markets()\n market = self.market(symbol)\n request = {\n 'pair': market['id'],\n }\n response = await self.publicGetPairTicker(self.extend(request, params))\n data = self.safe_value(response, 'data', {})\n return self.parse_ticker(data, market)",
"def get_ticker(self, ticker=\"ACN\", refresh=False):\n \n # Build financials\n logger.debug(\"Request for {0} forcing_refresh {1}\".format(ticker, refresh))\n # financial = {}\n\n financial = None\n stockdate = None\n ratios = None\n alt_ticker = None\n\n try:\n financial_item = self._get_financial(ticker, refresh)\n financial = financial_item.dump()\n \n today = datetime.today().date()\n if financial_item.updated.year < today.year or financial_item.updated.month < today.month:\n refresh = True\n # Get Key Ratios (incl health)\n logger.debug(\n f\"This is the financial {financial} REFRESH {refresh}\"\n )\n \n ratios = [val.dump() for val in self._get_key_ratios(ticker, refresh)]\n \n # Get Valuation\n valuations = [val.dump() for val in self._get_valuation_history(ticker, refresh)]\n logger.debug(valuations)\n \n\n financial[\"ratios\"] = ratios\n financial[\"valuations\"] = valuations\n \n # TODO: Get Dividend history\n dividend_history = [val.dump() for val in self._get_dividend_history(ticker, refresh)]\n \n if (refresh):\n financial_item.updated = today\n db.session.commit()\n \n # Assemble stock data\n stockdata = {\n \"symbol\": ticker,\n \"name\": financial[\"company_name\"],\n \"financials\": financial,\n \"dividend_history\": dividend_history,\n }\n \n return jsonify(stockdata)\n \n except Exception as e:\n logger.exception(\n \"Failed to retrieve data for {ticker}\".format(ticker=ticker)\n )\n return \"Not found\", 404",
"def get_tickers():\n\turl = \"https://api.iextrading.com/1.0/ref-data/symbols\"\n\t\n\ttry:\n\t\tresponse = requests.get(url)\n\t\tif str(response.status_code) == \"200\":\n\t\t\tprint(\"[UPDATE]: Downlaoding Tickers from iextrading API\")\n\t\t\tjson_stock_data = response.json()\n\n\t\t\tpd_stock = pandas.DataFrame(json_stock_data)\n\t\t\t# DataFrame Format\n\t\t\t# date iexId isEnabled name symbol type\n\t\t\t# 0 2019-02-12 2 True Agilent Technologies Inc. A cs\n\n\t\t\tprint(\"[SUCCESS]: Downloaded {} symbols from IEX.\".format(len(pd_stock.index)))\n\n\t\t\treturn pd_stock\n\n\t\telse:\n\t\t\tprint(\"[ERROR]: Download from IEX failed.\")\n\t\t\treturn \"ERROR\"\n\texcept Exception as e:\n\t\tprint(\"[ERROR]: {}\".format(e))\n\t\treturn \"ERROR\"",
"def fetchOHLC(ticker,interval,duration):\r\n instrument = instrumentLookup(instrument_df,ticker)\r\n data = pd.DataFrame(kite.historical_data(instrument,dt.date.today()-dt.timedelta(duration), dt.date.today(),interval))\r\n data.set_index(\"date\",inplace=True)\r\n return data",
"def get_stocks():\n print(\"fetching remote...\")\n code_dataframes = pd.read_html(\n 'http://kind.krx.co.kr/corpgeneral/corpList.do?method=download&searchType=13', header=0)[0]\n # 우리가 필요한 것은 회사명과 종목코드이기 때문에 필요없는 column들은 제외해준다.\n print(\"parsing and filtering data...\")\n code_dataframes.종목코드 = code_dataframes.종목코드.map('{:06d}'.format)\n # 한글로된 컬럼명을 영어로 바꿔준다.\n code_dataframes = code_dataframes[['회사명', '종목코드']]\n code_dataframes = code_dataframes.rename(\n columns={'회사명': 'name', '종목코드': 'code'})\n codes = code_dataframes['code']\n names = code_dataframes['name']\n stocks = []\n for i in range(len(names)):\n stocks.append({\n 'name': names[i],\n 'code': codes[i]\n })\n return stocks",
"async def fetch_tickers(self, symbols: Optional[List[str]] = None, params={}):\n await self.load_markets()\n symbols = self.market_symbols(symbols)\n request = {}\n if symbols is not None:\n ids = self.market_ids(symbols)\n request['symbols'] = ','.join(ids)\n else:\n request['symbols'] = 'ALL'\n tickers = await self.publicGetTickers(self.extend(request, params))\n #\n # [\n # # on trading pairs(ex. tBTCUSD)\n # [\n # SYMBOL,\n # BID,\n # BID_SIZE,\n # ASK,\n # ASK_SIZE,\n # DAILY_CHANGE,\n # DAILY_CHANGE_RELATIVE,\n # LAST_PRICE,\n # VOLUME,\n # HIGH,\n # LOW\n # ],\n # # on funding currencies(ex. fUSD)\n # [\n # SYMBOL,\n # FRR,\n # BID,\n # BID_PERIOD,\n # BID_SIZE,\n # ASK,\n # ASK_PERIOD,\n # ASK_SIZE,\n # DAILY_CHANGE,\n # DAILY_CHANGE_RELATIVE,\n # LAST_PRICE,\n # VOLUME,\n # HIGH,\n # LOW,\n # _PLACEHOLDER,\n # _PLACEHOLDER,\n # FRR_AMOUNT_AVAILABLE\n # ],\n # ...\n # ]\n #\n result = {}\n for i in range(0, len(tickers)):\n ticker = tickers[i]\n marketId = self.safe_string(ticker, 0)\n market = self.safe_market(marketId)\n symbol = market['symbol']\n result[symbol] = self.parse_ticker(ticker, market)\n return self.filter_by_array(result, 'symbol', symbols)",
"def LoadingData(self, ticker, FullHistory=False):\r\n if FullHistory == False:\r\n url = \"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol={}&apikey={}\"\r\n else:\r\n url = \"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol={}&outputsize=full&apikey={}\"\r\n\r\n try:\r\n response = requests.get(url.format(ticker, self.key))\r\n response.raise_for_status()\r\n except requests.exceptions.RequestException as e:\r\n raise SystemExit(e)\r\n\r\n # The API returns 200 status even after you have a typo\r\n try:\r\n outputjson = response.json()['Time Series (Daily)']\r\n except:\r\n print(\"Please check ticker for typos or mismatches\")\r\n outputjson = None\r\n\r\n return outputjson, ticker",
"def bloomberg(site):\n uri = \"https://www.bloomberg.com/markets/api/bulk-time-series/price/\"\n endpoint = (\n \"USDCNY%3ACUR,USDRUB%3ACUR,USDJPY%3ACUR,USDEUR%3ACUR,USDKRW%3ACUR\"\n + \",XAUUSD%3ACUR,XAGUSD%3ACUR\"\n )\n url = uri + endpoint\n headers = {\n \"authority\": \"www.bloomberg.com\",\n \"method\": \"GET\",\n \"path\": (\n \"/markets/api/comparison/data?securities=\"\n + \"USDCNY%3ACUR,USDRUB%3ACUR,USDJPY%3ACUR,USDEUR%3ACUR,USDKRW%3ACUR\"\n + \",XAUUSD%3ACUR,XAGUSD%3ACUR\"\n + \"&securityType=CURRENCY&locale=en\"\n ),\n \"scheme\": \"https\",\n \"accept\": (\n \"text/html,application/xhtml+xml,application/xml;q=0.9,image/\"\n + \"webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\"\n ),\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"en-US,en;q=0.9\",\n \"cache-control\": \"max-age=0\",\n \"cookie\": (\n \"bbAbVisits=1; _pxhd=e24b47c64d37711c147cfb3c4b35c845563d2f9831b\"\n + \"03d9189f8cd761bc2be4f:d78eeb01-34c9-11ea-8f86-51d2aad9afb3; _px\"\n + \"vid=d78eeb01-34c9-11ea-8f86-51d2aad9afb3; _reg-csrf=s%3Ab0pWvbcs\"\n + \"UtrjYeJ0T2GrTaaD.8kaQlvHchJ1D%2FZZMaQWQiTizJTxrqqyzzuEZHEvlQNw;\"\n + \" agent_id=7989385a-d6d9-4446-b7aa-3c937407862b;\"\n + \" session_id=5702901e-d5fe-41e7-b259-df46322015e0;\"\n + \" session_key=3179869387f4c4ec4385e0d16222f0e59f48c47f;\"\n + \" _user-status=anonymous; _is-ip-whitelisted=false;\"\n + \" _user-ip=91.132.137.116; trc_cookie_storage=taboola%2520global%253\"\n + \"Auser-id%3D2f4acdc6-7c3c-412c-8766-d9c80dcffc38-tuct513df3e;\"\n + \" bdfpc=004.0586371899.1578785723722;\"\n + \" _reg-csrf-token=4ZxUa9q8-fkNXQkoHHXhnobWne1sDlIVcKEQ\"\n ),\n \"dnt\": \"1\",\n \"if-none-match\": 'W/\"lZU52eQYxjadyNKGCyftEg==\"',\n \"sec-fetch-mode\": \"navigate\",\n \"sec-fetch-site\": \"none\",\n \"sec-fetch-user\": \"?1\",\n \"upgrade-insecure-requests\": \"1\",\n \"user-agent\": (\n \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\"\n + \" (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36 OPR/66.0.3515.27\"\n ),\n }\n try:\n session = requests.Session()\n session.headers = headers\n cfscrape_requests = cfscrape.create_scraper(sess=session)\n ret = cfscrape_requests.get(url, timeout=(15, 15)).json()\n data = {}\n for item in ret:\n symbol = item[\"id\"].replace(\":CUR\", \"\")\n symbol = symbol[:3] + \":\" + symbol[-3:]\n data[symbol] = float(item[\"lastPrice\"])\n data[\"USD:XAG\"] = 1 / data.pop(\"XAG:USD\")\n data[\"USD:XAU\"] = 1 / data.pop(\"XAU:USD\")\n data = refine_data(data)\n print(site, data)\n race_write(f\"{site}_forex.txt\", json_dumps(data))\n except:\n print(f\"{site} failed to load\")",
"def load_pickled_price_history(self, ticker: str) -> pd.DataFrame:\n try:\n price_history = pd.read_pickle(f\"../data/pickles/{ticker}.pkl\")\n return price_history\n except FileNotFoundError:\n print(f\"no pickle available for {ticker}; falling back to DB\")\n return None",
"def fetchOHLC(ticker,interval,duration):\n instrument = instrumentLookup(instrument_df,ticker)\n data = pd.DataFrame(kite.historical_data(instrument,dt.date.today()-dt.timedelta(duration), dt.date.today(),interval))\n data.set_index(\"date\",inplace=True)\n return data",
"def get_data_for_ticker(ticker):\n logger.debug(f'processing get_data_for_ticker({ticker})')\n df_data = get_existing_data_for_ticker(ticker)\n start_date, end_date = get_ticker_start_and_end_dates(df_data)\n logger.debug(f'retrieving for {ticker} from {start_date} to {end_date}')\n df_new_data = pd.DataFrame()\n if start_date != end_date:\n df_new_data = ping_yahoo_for_ticker(ticker, start_date, end_date)\n if df_data.empty:\n df_data = df_new_data\n else:\n df_data = df_data.append(df_new_data)\n return df_data",
"def fetchOHLC(ticker,interval = \"minute\",duration=4):\r\n data = pd.DataFrame(kite.historical_data(ticker,dt.date.today()-dt.timedelta(duration), dt.date.today(),interval))\r\n data.date =data.date.map(lambda t: t.strftime('%Y-%m-%d %H:%M'))\r\n return data"
] | [
"0.7115917",
"0.6569476",
"0.64997846",
"0.641512",
"0.63950694",
"0.63743776",
"0.63675433",
"0.6357785",
"0.6351763",
"0.6344594",
"0.634049",
"0.6279552",
"0.62463516",
"0.6239634",
"0.6233006",
"0.61905515",
"0.6186492",
"0.6181986",
"0.6160244",
"0.61591166",
"0.60775226",
"0.6067212",
"0.6061159",
"0.60507596",
"0.6037849",
"0.6030695",
"0.60294217",
"0.6021052",
"0.5991321",
"0.59878147"
] | 0.6994034 | 1 |
Applies a substituition cypher done by the rotor from right to left input_letter > integer that represents the letter rotor > rotor as a list of integers | def _rotor_right2left(rotor, input_letter, offset, ring):
alpha_size = len(ALPHABET)
return (rotor[(input_letter + offset - ring) % alpha_size] - offset +\
ring) % alpha_size | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _rotor_left2right(rotor, input_letter, offset, ring):\n\t\tletter = (input_letter + offset - ring) % len(ALPHABET)\n\t\treturn (rotor.index(letter) - offset + ring) % len(ALPHABET)",
"def scramble(r_letters, s_letters):\r\n if len(r_letters) == 0:\r\n # Base case: All letters used\r\n print(s_letters)\r\n else:\r\n # Recursive case: For each call to scramble()\r\n # move a letter from remaining to scrambled\r\n for i in range(len(r_letters)):\r\n # The letter at index i will be scrambled\r\n scramble_letter = r_letters[i]\r\n \r\n # Remove letter to scramble from remaining letters list\r\n remaining_letters = r_letters[:i] + r_letters[i+1:]\r\n \r\n # Scramble letter\r\n scramble(remaining_letters, s_letters + scramble_letter)",
"def rotate_character(char, rot):\n if type(char) != type(''):\n return char\n if type(rot) != type(1):\n return char\n \n if len(char) != 1:\n return char\n if not char.isalpha():\n return char\n letters = lowerLetters\n if char.isupper():\n letters = upperLetters\n \n pos = letters.find(char)\n pos += rot\n pos = pos % 26\n \n return letters[pos]",
"def _forward(self, letter):\n\t\tself._turn_rotors()\n\t\tl = letter\n\t\tfor i in range(-1, -self.n_rotors - 1, -1):\n\t\t\tl = self._rotor_right2left(self.rotors[i], l, self.offsets[i],\n\t\t\t\t\t\t\t\t\tself.rings[i])\n\t\treturn l",
"def adjustAlphabetByPosition(self):\n for i in range(0, self._position): #Rotates rotors until it reaches the input position without stepping or incrementing the position attribute\n self.adjustRotor()",
"def rot_encode(shift, txt):\n # list of the alphabet accounting for both upper and lower case. \n # ignore symbols.\n\n alpha = list('abcdefghijklmnopqrstuvwxyz')\n Alpha=list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')\n \n # create list to save encoded characters\n new_txt=[]\n\n for ch in txt:\n\n # check character against alphabets\n if ch in alpha:\n new_txt.append(alpha[(alpha.index(ch) + shift) % 26])\n elif ch in Alpha:\n new_txt.append(Alpha[(Alpha.index(ch) + shift) % 26])\n \n else:\n # if not a character just append the symbol\n new_txt.append(ch)\n\n # return as string\n return ''.join(new_txt)",
"def encryptionMultipleRotate(text, power):\n s = text\n transformedChar = \"\"\n transformedChar = s[-power:] + s[:-(power)]\n\n print(\"Multiple Rotation Encrypted text : \" )\n return transformedChar",
"def place_in_alphabet(letters):\r\n\tfor l in letters:\r\n\t\tprint(l, ':', str(ALPHABET.index(l)+1))",
"def adjustRotor(self):\n #Ensures next positions are within alphabet range\n for k, v in self.alphabet.items():\n self.alphabet[k] = (v - 1)% 26 #Loops the alphabet from Z (26th letter) back to A (1st letter)\n \n #Swaps dictionary format from {‘Key’: value} to {value: ‘Key’} so we can change the characters\n tempAlphabet = {}\n for k, v in self.alphabet.items():\n tempAlphabet[v] = k\n self.alphabet = tempAlphabet\n \n #Changes internal wiring contact to its previous contact\n for k, v in self.alphabet.items():\n charNum = self.ordChar(v)\n prevCharNum = (charNum - 1)% 26\n prevChar = self.chrNum(prevCharNum)\n self.alphabet[k] = prevChar\n \n #Swaps dictionary format from {value: ‘Key’} back to original {‘Key’: value} format\n tempAlphabet = {}\n for k, v in self.alphabet.items():\n tempAlphabet[v] = k\n self.alphabet = tempAlphabet",
"def incrementRotor(self):\n self._position += 1\n self._position = self._position % 26 \n \n #Ensures next positions are within alphabet range\n for k, v in self.alphabet.items():\n self.alphabet[k] = (v - 1)% 26\n \n #Swaps dictionary format from {‘Key’: value} to {value: ‘Key’} so we can change the characters\n tempAlphabet = {}\n for k, v in self.alphabet.items():\n tempAlphabet[v] = k\n self.alphabet = tempAlphabet\n \n #Changes internal wiring contact to its previous contact\n for k, v in self.alphabet.items():\n charNum = self.ordChar(v)\n prevCharNum = (charNum - 1)% 26\n prevChar = self.chrNum(prevCharNum)\n self.alphabet[k] = prevChar\n \n #Swaps dictionary format from {value: ‘Key’} back to original {‘Key’: value} format\n tempAlphabet = {}\n for k, v in self.alphabet.items():\n tempAlphabet[v] = k\n self.alphabet = tempAlphabet",
"def rotate(text, rot):\n ret = []\n for char in text:\n ret.append(convert(char, rot))\n return ''.join(ret)",
"def compass(sequence):\n\n temp_seq = list(sequence)\n result = \"\"\n circular_queue = deque(['u', 'r', 'd', 'l'], maxlen=4)\n char2idx = {'u':0, 'r':1, 'd':2, 'l':3}\n for i in range(len(temp_seq)):\n # get chars\n current_char = temp_seq[i]\n if i != 0:\n prev_char = temp_seq[i-1]\n else:\n prev_char = ''\n\n # insert char\n result += current_char\n\n # if chars are equal no rotation needed\n if prev_char == current_char:\n continue\n\n # update compass\n if current_char == 'r':\n circular_queue.rotate(1)\n elif current_char == 'l':\n circular_queue.rotate(-1)\n elif current_char == 'd':\n circular_queue.rotate(-2)\n elif current_char == 'T': \n circular_queue.rotate(-2)\n \n # update temp_seq\n for j, char in enumerate(sequence):\n if char == 'T':\n continue\n idx = char2idx[char]\n new_char = circular_queue[idx]\n temp_seq[j] = new_char\n \n return result",
"def encode():\n result = \"\"\n alphabet = \"abcdefghijklmnopqrstuvwxyz\"\n shift = int(input(\"select a number to encode your message\"))\n inverted = alphabet[shift:] + alphabet[:shift]\n message = input(\"please enter a message to encode\")\n for y in message:\n result += inverted[alphabet.index(y)]\n print(result)",
"def rotate(word_to_rotate,how_much):\n\thow_much-=26*(how_much/26)\t\n\t\n\tlength=len(word_to_rotate)\n\tk=0\n\tword=[0]*len(word_to_rotate)\n\twhile k < length:\n\t\tm_ord=ord(word_to_rotate[k])+how_much\n\t\tif m_ord>122:\n\t\t\tm_ord-=26\n\n\t\tm_chr=chr(m_ord)\n\t\tword[k]=m_chr\t\t\n\t\t#print m,\n\t\tk=k+1\n\tword2=''.join(word)\n\treturn word2",
"def transcribe(seq):\n rna = ''\n for letter in seq:\n if letter == 'A':\n rna = rna + 'U'\n elif letter == 'T':\n rna = rna + 'A'\n elif letter == 'G':\n rna = rna + 'C'\n else:\n rna = rna + 'G'\n return rna",
"def encryptionMultipleShift(text, index, power):\n s=text\n transformedChar=\"\"\n\n transformedChar = ord(s[index]) + (power % 26)\n if (transformedChar >= 90):\n transformedChar = chr(64 + (transformedChar - 90))\n else:\n transformedChar = chr(transformedChar)\n\n print(\"Multiple Shift Encrypted text : \" )\n return s[:index] + transformedChar + s[index+1:]",
"def translate(l, a, c):\n try:\n i = int(l[0])\n a.append(i)\n except ValueError:\n return False\n for j in range(97, 97 + c):\n if l[1].lower() == chr(j):\n a.append(j - 97)\n break\n elif j == (97 + c):\n return False\n try:\n i = int(l[2])\n a.append(i)\n except ValueError:\n return False\n for j in range(97, 97 + c):\n if l[3].lower() == chr(j):\n a.append(j - 97)\n break\n elif j == (97 + c):\n return False\n return True",
"def _get_rot_letter(self, letter, rot, operation, alphabet):\n if len(letter) != 1:\n raise ValueError(\"'letter' deve ter length 1.\")\n\n if letter not in alphabet:\n letter_new = letter\n\n else:\n letter_pos = alphabet.index(letter)\n letter_new = alphabet[operation(letter_pos, rot) % len(alphabet)]\n \n return letter_new",
"def buildCoder(shift):\n alphabet = string.ascii_lowercase \n alphabet2 = string.ascii_uppercase \n \n \n #Create our substitution dictionary \n dic={} \n dic2={}\n for i in range(0,len(alphabet)): \n dic[alphabet[i]]=alphabet[(i+shift)%len(alphabet)]\n dic2[alphabet2[i]]=alphabet2[(i+shift)%len(alphabet2)]\n \n dic.update(dic2)\n \n return dic",
"def encryptAffine(letter, a, b):\n if(gcd(7, 26) != 1):\n return \"Error, not a bijection\"\n else:\n encrypted_letter = \"\"\n for i in range(len(letter)):\n if(ord(letter[i]) == 32):\n encrypted_letter += chr(ord(letter[i]))\n else:\n let = letter[i].lower()\n let = ord(let) - 97\n new_let = (((let* a) + b) % 26) + 97\n encrypted_letter += chr(new_let)\n return encrypted_letter",
"def rotate_letter(c, num):\n return chr(((ord(c) - 97) + num) % 26 + 97)",
"def rotate_word(s1, n):\n s2 = ''\n for c in s1:\n i = (ord(c)-97+n) % 26\n ch = chr(i+97)\n s2 = s2 + ch\n return s2",
"def coding():\r\n \r\n key={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}\r\n x=0 #determine the sliding of the letters\r\n \r\n def isKeyEmpty(k):\r\n \"\"\"Utility Function that checks if key is empty\"\"\"\r\n if k=={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}:\r\n return True\r\n return False\r\n \r\n def set_key(vars): #vars=[0]num,[1]rWord,[2]rString\r\n \"\"\"Function that set the new key\"\"\"\r\n nonlocal key\r\n nonlocal x\r\n x=vars[0]\r\n if (vars[1]=='yes'):\r\n key['reverse_word']=True\r\n if (vars[2]=='yes'):\r\n key['reverse_string']=True\r\n if (x<-26 or x>26):\r\n x=x%26 #makes x to be in range\r\n if (x==0):\r\n x=random.randrange(-26,26) #random number\r\n for i in range (97,123): #26 ABC letters, ASCII value of 'a' is 97 97+26=123\r\n if(i+x>122):\r\n key[chr(i)]=chr(i-25+x)\r\n elif (i+x<97):\r\n key[chr(i)]=chr(i+26+x)\r\n else:\r\n key[chr(i)]=chr(i+x)\r\n print(\"done\")\r\n \r\n def empty_key():\r\n \"\"\"Function makes current key empty\"\"\"\r\n nonlocal key\r\n nonlocal x\r\n x=0\r\n key={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}\r\n print(\"done\")\r\n \r\n def export_key():\r\n \"\"\"Function export key\"\"\"\r\n if(isKeyEmpty(key)):\r\n print(\"key empty\")\r\n else:\r\n return key\r\n \r\n def import_key(key2):\r\n \"\"\"Function import key\"\"\"\r\n nonlocal key\r\n if(isKeyEmpty(key2)):\r\n print(\"key is empty\")\r\n else:\r\n key=key2\r\n print(\"done\")\r\n \r\n def encoding(sentence):\r\n \"\"\"function encoding given string with the key\"\"\"\r\n sentence=list(sentence)\r\n for i in range(len(sentence)):\r\n if (sentence[i]!=' '):\r\n sentence[i]=key[sentence[i]]\r\n sentence=''.join(sentence)\r\n if(key['reverse_word']==True):\r\n splitT=tuple(sentence.split(' '))\r\n splitT=map(lambda x:x[::-1],splitT)\r\n sentence=' '.join(splitT)\r\n if(key['reverse_string']==True):\r\n splitList=sentence.split(' ')\r\n splitList=splitList[-1::-1]\r\n sentence=' '.join(splitList)\r\n return sentence\r\n \r\n def decoding(sentence):\r\n \"\"\"function decoding given string with the key\"\"\"\r\n if(isKeyEmpty(key)):\r\n return \"key empty\"\r\n helpKey=dict((y,x) for x,y in key.items())\r\n if(key['reverse_word']==True):\r\n splitT=tuple(sentence.split(' '))\r\n splitT=map(lambda x:x[::-1],splitT)\r\n sentence=' '.join(splitT)\r\n if(key['reverse_string']==True):\r\n splitList=sentence.split(' ')\r\n splitList=splitList[-1::-1]\r\n sentence=' '.join(splitList)\r\n sentence=list(sentence)\r\n for i in range(len(sentence)):\r\n if(sentence[i]!=' '):\r\n sentence[i]=helpKey[sentence[i]]\r\n sentence=''.join(sentence)\r\n return sentence\r\n\r\n def dispatch(message,var=None):\r\n \"\"\"dispatch with message passing\"\"\"\r\n if message=='set_key':\r\n set_key(var)\r\n elif message=='empty_key':\r\n empty_key()\r\n elif message=='export_key':\r\n return export_key()\r\n elif message=='import_key':\r\n import_key(var)\r\n elif message=='encoding':\r\n return encoding(var)\r\n elif message=='decoding':\r\n return decoding(var)\r\n else:\r\n print(\"Unknown message\") \r\n return dispatch",
"def decryptionMultipleRotate(text, power):\n s = text;\n transformedChar = \"\"\n transformedChar = s[power:] + s[0:power]\n\n print(\"Multiple Rotation Decrypted text : \" )\n return transformedChar",
"def scramble(src):\n\n output = \"\"\n\n for each in src.lower():\n diff = ord(each) - ord('a')\n\n if diff >= 0 and diff < 26:\n output += chr(ord('a') + (25 - (ord(each) - ord('a'))))\n elif each >= '0' and each <= '9':\n output += each\n\n return output",
"def rotate_character(char, rot):\n if char.isalpha() == False:\n return char\n else:\n original_pos = alphabet_position(char)\n new_pos = original_pos + rot\n\n if new_pos >=26:\n new_pos = new_pos % 26\n\n alphabet='abcdefghijklmnopqrstuvwxyz'\n ALPHABET='ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n \n if char.isupper() == True:\n new_char = ALPHABET[new_pos]\n else:\n new_char = alphabet[new_pos]\n \n return new_char",
"def rotate(s, index):\n # we'll have to split the list from index so that we can only rotate the charaters starting after index\n left = s[:index]\n right = s[index:]\n\n # shift each character to the left\n # \"abcd\" => \"bcda\"\n rotated = right[1:]\n #rotated.append(right[0])\n rotated += right[0]\n # merged the rotated and left to form a complete word\n s = left + rotated\n return s",
"def applyCoder(text, coder):\n ciphertext = str()\n #for each letter in the text find it, and grab shifted letter\n for letter in text:\n ciphertext += coder.get(letter, letter)\n return ciphertext",
"def decode():\n result = \"\"\n alphabet = \"abcdefghijklmnopqrstuvwxyz\"\n shift = int(input(\"select a number to decode your message\"))\n inverted = alphabet[shift:] + alphabet[:shift]\n message = input(\"please enter a message to decode\")\n for y in message:\n result += alphabet[inverted.index(y)]\n print(result)",
"def rot13(string):\n i = 0 # initialize counter\n\n lyst = list(string) # transform string into list to allow item reassignment\n\n for letter in lyst:\n\n if letter.isalpha(): # only rotate alphabetic characters\n\n # rotation:\n a = ord(letter) + 13\n\n # management of revolving (if after rotation the letter goes \"beyond\" Z we make it wrap around to the\n # beginning)\n if (letter.isupper() and a > 90) or (letter.islower() and a > 122):\n a = a - 26\n\n # item reassignment\n lyst[i] = chr(a)\n\n # increment counter regardless of if branch execution\n i = i + 1\n\n string = ''.join(lyst) # reassign the variable string to its new rotated value\n return string"
] | [
"0.63908404",
"0.61391145",
"0.6079742",
"0.6025106",
"0.59956324",
"0.5960136",
"0.5949024",
"0.5942177",
"0.5936276",
"0.5896446",
"0.5841828",
"0.58261704",
"0.58245915",
"0.5823976",
"0.5818802",
"0.5812918",
"0.5809682",
"0.5798197",
"0.5780767",
"0.5763846",
"0.5762415",
"0.5761436",
"0.5750502",
"0.5743977",
"0.57415617",
"0.5709329",
"0.5675822",
"0.56729484",
"0.5669705",
"0.5621823"
] | 0.6469157 | 0 |
Applies a substituition cypher done by the rotor from left to right input_letter > integer that represents the letter rotor > rotor as a list of integers | def _rotor_left2right(rotor, input_letter, offset, ring):
letter = (input_letter + offset - ring) % len(ALPHABET)
return (rotor.index(letter) - offset + ring) % len(ALPHABET) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _rotor_right2left(rotor, input_letter, offset, ring):\n\t\talpha_size = len(ALPHABET)\n\t\treturn (rotor[(input_letter + offset - ring) % alpha_size] - offset +\\\n\t\t\t\t\tring) % alpha_size",
"def rotate_character(char, rot):\n if type(char) != type(''):\n return char\n if type(rot) != type(1):\n return char\n \n if len(char) != 1:\n return char\n if not char.isalpha():\n return char\n letters = lowerLetters\n if char.isupper():\n letters = upperLetters\n \n pos = letters.find(char)\n pos += rot\n pos = pos % 26\n \n return letters[pos]",
"def scramble(r_letters, s_letters):\r\n if len(r_letters) == 0:\r\n # Base case: All letters used\r\n print(s_letters)\r\n else:\r\n # Recursive case: For each call to scramble()\r\n # move a letter from remaining to scrambled\r\n for i in range(len(r_letters)):\r\n # The letter at index i will be scrambled\r\n scramble_letter = r_letters[i]\r\n \r\n # Remove letter to scramble from remaining letters list\r\n remaining_letters = r_letters[:i] + r_letters[i+1:]\r\n \r\n # Scramble letter\r\n scramble(remaining_letters, s_letters + scramble_letter)",
"def encryptionMultipleRotate(text, power):\n s = text\n transformedChar = \"\"\n transformedChar = s[-power:] + s[:-(power)]\n\n print(\"Multiple Rotation Encrypted text : \" )\n return transformedChar",
"def adjustAlphabetByPosition(self):\n for i in range(0, self._position): #Rotates rotors until it reaches the input position without stepping or incrementing the position attribute\n self.adjustRotor()",
"def rot_encode(shift, txt):\n # list of the alphabet accounting for both upper and lower case. \n # ignore symbols.\n\n alpha = list('abcdefghijklmnopqrstuvwxyz')\n Alpha=list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')\n \n # create list to save encoded characters\n new_txt=[]\n\n for ch in txt:\n\n # check character against alphabets\n if ch in alpha:\n new_txt.append(alpha[(alpha.index(ch) + shift) % 26])\n elif ch in Alpha:\n new_txt.append(Alpha[(Alpha.index(ch) + shift) % 26])\n \n else:\n # if not a character just append the symbol\n new_txt.append(ch)\n\n # return as string\n return ''.join(new_txt)",
"def adjustRotor(self):\n #Ensures next positions are within alphabet range\n for k, v in self.alphabet.items():\n self.alphabet[k] = (v - 1)% 26 #Loops the alphabet from Z (26th letter) back to A (1st letter)\n \n #Swaps dictionary format from {‘Key’: value} to {value: ‘Key’} so we can change the characters\n tempAlphabet = {}\n for k, v in self.alphabet.items():\n tempAlphabet[v] = k\n self.alphabet = tempAlphabet\n \n #Changes internal wiring contact to its previous contact\n for k, v in self.alphabet.items():\n charNum = self.ordChar(v)\n prevCharNum = (charNum - 1)% 26\n prevChar = self.chrNum(prevCharNum)\n self.alphabet[k] = prevChar\n \n #Swaps dictionary format from {value: ‘Key’} back to original {‘Key’: value} format\n tempAlphabet = {}\n for k, v in self.alphabet.items():\n tempAlphabet[v] = k\n self.alphabet = tempAlphabet",
"def place_in_alphabet(letters):\r\n\tfor l in letters:\r\n\t\tprint(l, ':', str(ALPHABET.index(l)+1))",
"def _forward(self, letter):\n\t\tself._turn_rotors()\n\t\tl = letter\n\t\tfor i in range(-1, -self.n_rotors - 1, -1):\n\t\t\tl = self._rotor_right2left(self.rotors[i], l, self.offsets[i],\n\t\t\t\t\t\t\t\t\tself.rings[i])\n\t\treturn l",
"def incrementRotor(self):\n self._position += 1\n self._position = self._position % 26 \n \n #Ensures next positions are within alphabet range\n for k, v in self.alphabet.items():\n self.alphabet[k] = (v - 1)% 26\n \n #Swaps dictionary format from {‘Key’: value} to {value: ‘Key’} so we can change the characters\n tempAlphabet = {}\n for k, v in self.alphabet.items():\n tempAlphabet[v] = k\n self.alphabet = tempAlphabet\n \n #Changes internal wiring contact to its previous contact\n for k, v in self.alphabet.items():\n charNum = self.ordChar(v)\n prevCharNum = (charNum - 1)% 26\n prevChar = self.chrNum(prevCharNum)\n self.alphabet[k] = prevChar\n \n #Swaps dictionary format from {value: ‘Key’} back to original {‘Key’: value} format\n tempAlphabet = {}\n for k, v in self.alphabet.items():\n tempAlphabet[v] = k\n self.alphabet = tempAlphabet",
"def compass(sequence):\n\n temp_seq = list(sequence)\n result = \"\"\n circular_queue = deque(['u', 'r', 'd', 'l'], maxlen=4)\n char2idx = {'u':0, 'r':1, 'd':2, 'l':3}\n for i in range(len(temp_seq)):\n # get chars\n current_char = temp_seq[i]\n if i != 0:\n prev_char = temp_seq[i-1]\n else:\n prev_char = ''\n\n # insert char\n result += current_char\n\n # if chars are equal no rotation needed\n if prev_char == current_char:\n continue\n\n # update compass\n if current_char == 'r':\n circular_queue.rotate(1)\n elif current_char == 'l':\n circular_queue.rotate(-1)\n elif current_char == 'd':\n circular_queue.rotate(-2)\n elif current_char == 'T': \n circular_queue.rotate(-2)\n \n # update temp_seq\n for j, char in enumerate(sequence):\n if char == 'T':\n continue\n idx = char2idx[char]\n new_char = circular_queue[idx]\n temp_seq[j] = new_char\n \n return result",
"def encryptAffine(letter, a, b):\n if(gcd(7, 26) != 1):\n return \"Error, not a bijection\"\n else:\n encrypted_letter = \"\"\n for i in range(len(letter)):\n if(ord(letter[i]) == 32):\n encrypted_letter += chr(ord(letter[i]))\n else:\n let = letter[i].lower()\n let = ord(let) - 97\n new_let = (((let* a) + b) % 26) + 97\n encrypted_letter += chr(new_let)\n return encrypted_letter",
"def rotate(text, rot):\n ret = []\n for char in text:\n ret.append(convert(char, rot))\n return ''.join(ret)",
"def encryptionMultipleShift(text, index, power):\n s=text\n transformedChar=\"\"\n\n transformedChar = ord(s[index]) + (power % 26)\n if (transformedChar >= 90):\n transformedChar = chr(64 + (transformedChar - 90))\n else:\n transformedChar = chr(transformedChar)\n\n print(\"Multiple Shift Encrypted text : \" )\n return s[:index] + transformedChar + s[index+1:]",
"def rotate(word_to_rotate,how_much):\n\thow_much-=26*(how_much/26)\t\n\t\n\tlength=len(word_to_rotate)\n\tk=0\n\tword=[0]*len(word_to_rotate)\n\twhile k < length:\n\t\tm_ord=ord(word_to_rotate[k])+how_much\n\t\tif m_ord>122:\n\t\t\tm_ord-=26\n\n\t\tm_chr=chr(m_ord)\n\t\tword[k]=m_chr\t\t\n\t\t#print m,\n\t\tk=k+1\n\tword2=''.join(word)\n\treturn word2",
"def _get_rot_letter(self, letter, rot, operation, alphabet):\n if len(letter) != 1:\n raise ValueError(\"'letter' deve ter length 1.\")\n\n if letter not in alphabet:\n letter_new = letter\n\n else:\n letter_pos = alphabet.index(letter)\n letter_new = alphabet[operation(letter_pos, rot) % len(alphabet)]\n \n return letter_new",
"def coding():\r\n \r\n key={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}\r\n x=0 #determine the sliding of the letters\r\n \r\n def isKeyEmpty(k):\r\n \"\"\"Utility Function that checks if key is empty\"\"\"\r\n if k=={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}:\r\n return True\r\n return False\r\n \r\n def set_key(vars): #vars=[0]num,[1]rWord,[2]rString\r\n \"\"\"Function that set the new key\"\"\"\r\n nonlocal key\r\n nonlocal x\r\n x=vars[0]\r\n if (vars[1]=='yes'):\r\n key['reverse_word']=True\r\n if (vars[2]=='yes'):\r\n key['reverse_string']=True\r\n if (x<-26 or x>26):\r\n x=x%26 #makes x to be in range\r\n if (x==0):\r\n x=random.randrange(-26,26) #random number\r\n for i in range (97,123): #26 ABC letters, ASCII value of 'a' is 97 97+26=123\r\n if(i+x>122):\r\n key[chr(i)]=chr(i-25+x)\r\n elif (i+x<97):\r\n key[chr(i)]=chr(i+26+x)\r\n else:\r\n key[chr(i)]=chr(i+x)\r\n print(\"done\")\r\n \r\n def empty_key():\r\n \"\"\"Function makes current key empty\"\"\"\r\n nonlocal key\r\n nonlocal x\r\n x=0\r\n key={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}\r\n print(\"done\")\r\n \r\n def export_key():\r\n \"\"\"Function export key\"\"\"\r\n if(isKeyEmpty(key)):\r\n print(\"key empty\")\r\n else:\r\n return key\r\n \r\n def import_key(key2):\r\n \"\"\"Function import key\"\"\"\r\n nonlocal key\r\n if(isKeyEmpty(key2)):\r\n print(\"key is empty\")\r\n else:\r\n key=key2\r\n print(\"done\")\r\n \r\n def encoding(sentence):\r\n \"\"\"function encoding given string with the key\"\"\"\r\n sentence=list(sentence)\r\n for i in range(len(sentence)):\r\n if (sentence[i]!=' '):\r\n sentence[i]=key[sentence[i]]\r\n sentence=''.join(sentence)\r\n if(key['reverse_word']==True):\r\n splitT=tuple(sentence.split(' '))\r\n splitT=map(lambda x:x[::-1],splitT)\r\n sentence=' '.join(splitT)\r\n if(key['reverse_string']==True):\r\n splitList=sentence.split(' ')\r\n splitList=splitList[-1::-1]\r\n sentence=' '.join(splitList)\r\n return sentence\r\n \r\n def decoding(sentence):\r\n \"\"\"function decoding given string with the key\"\"\"\r\n if(isKeyEmpty(key)):\r\n return \"key empty\"\r\n helpKey=dict((y,x) for x,y in key.items())\r\n if(key['reverse_word']==True):\r\n splitT=tuple(sentence.split(' '))\r\n splitT=map(lambda x:x[::-1],splitT)\r\n sentence=' '.join(splitT)\r\n if(key['reverse_string']==True):\r\n splitList=sentence.split(' ')\r\n splitList=splitList[-1::-1]\r\n sentence=' '.join(splitList)\r\n sentence=list(sentence)\r\n for i in range(len(sentence)):\r\n if(sentence[i]!=' '):\r\n sentence[i]=helpKey[sentence[i]]\r\n sentence=''.join(sentence)\r\n return sentence\r\n\r\n def dispatch(message,var=None):\r\n \"\"\"dispatch with message passing\"\"\"\r\n if message=='set_key':\r\n set_key(var)\r\n elif message=='empty_key':\r\n empty_key()\r\n elif message=='export_key':\r\n return export_key()\r\n elif message=='import_key':\r\n import_key(var)\r\n elif message=='encoding':\r\n return encoding(var)\r\n elif message=='decoding':\r\n return decoding(var)\r\n else:\r\n print(\"Unknown message\") \r\n return dispatch",
"def buildCoder(shift):\n alphabet = string.ascii_lowercase \n alphabet2 = string.ascii_uppercase \n \n \n #Create our substitution dictionary \n dic={} \n dic2={}\n for i in range(0,len(alphabet)): \n dic[alphabet[i]]=alphabet[(i+shift)%len(alphabet)]\n dic2[alphabet2[i]]=alphabet2[(i+shift)%len(alphabet2)]\n \n dic.update(dic2)\n \n return dic",
"def rotate_character(char, rot):\n if char.isalpha() == False:\n return char\n else:\n original_pos = alphabet_position(char)\n new_pos = original_pos + rot\n\n if new_pos >=26:\n new_pos = new_pos % 26\n\n alphabet='abcdefghijklmnopqrstuvwxyz'\n ALPHABET='ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n \n if char.isupper() == True:\n new_char = ALPHABET[new_pos]\n else:\n new_char = alphabet[new_pos]\n \n return new_char",
"def rotate_letter(c, num):\n return chr(((ord(c) - 97) + num) % 26 + 97)",
"def translate(l, a, c):\n try:\n i = int(l[0])\n a.append(i)\n except ValueError:\n return False\n for j in range(97, 97 + c):\n if l[1].lower() == chr(j):\n a.append(j - 97)\n break\n elif j == (97 + c):\n return False\n try:\n i = int(l[2])\n a.append(i)\n except ValueError:\n return False\n for j in range(97, 97 + c):\n if l[3].lower() == chr(j):\n a.append(j - 97)\n break\n elif j == (97 + c):\n return False\n return True",
"def rotate_word(s1, n):\n s2 = ''\n for c in s1:\n i = (ord(c)-97+n) % 26\n ch = chr(i+97)\n s2 = s2 + ch\n return s2",
"def encode():\n result = \"\"\n alphabet = \"abcdefghijklmnopqrstuvwxyz\"\n shift = int(input(\"select a number to encode your message\"))\n inverted = alphabet[shift:] + alphabet[:shift]\n message = input(\"please enter a message to encode\")\n for y in message:\n result += inverted[alphabet.index(y)]\n print(result)",
"def transcribe(seq):\n rna = ''\n for letter in seq:\n if letter == 'A':\n rna = rna + 'U'\n elif letter == 'T':\n rna = rna + 'A'\n elif letter == 'G':\n rna = rna + 'C'\n else:\n rna = rna + 'G'\n return rna",
"def decryptionMultipleRotate(text, power):\n s = text;\n transformedChar = \"\"\n transformedChar = s[power:] + s[0:power]\n\n print(\"Multiple Rotation Decrypted text : \" )\n return transformedChar",
"def scramble(src):\n\n output = \"\"\n\n for each in src.lower():\n diff = ord(each) - ord('a')\n\n if diff >= 0 and diff < 26:\n output += chr(ord('a') + (25 - (ord(each) - ord('a'))))\n elif each >= '0' and each <= '9':\n output += each\n\n return output",
"def rotate(s, index):\n # we'll have to split the list from index so that we can only rotate the charaters starting after index\n left = s[:index]\n right = s[index:]\n\n # shift each character to the left\n # \"abcd\" => \"bcda\"\n rotated = right[1:]\n #rotated.append(right[0])\n rotated += right[0]\n # merged the rotated and left to form a complete word\n s = left + rotated\n return s",
"def encipher(S, n):\n new = ''\n for i in S:\n c = rot(i, n)\n new = new + c\n return new",
"def applyCoder(text, coder):\n ciphertext = str()\n #for each letter in the text find it, and grab shifted letter\n for letter in text:\n ciphertext += coder.get(letter, letter)\n return ciphertext",
"def rotate_character(char, rot):\n alphabet = \"abcdefghijklmnopqrstuvwxyz\"\n if char.lower() not in alphabet:\n return char\n mod = (alphabet_position(char) + rot) % len(alphabet)\n if char in alphabet:\n newChar = chr(97 + mod)\n else:\n newChar = chr(65 + mod)\n return newChar"
] | [
"0.6404293",
"0.61947584",
"0.614188",
"0.60083866",
"0.6000814",
"0.59757674",
"0.59642",
"0.5956721",
"0.5927695",
"0.5915064",
"0.5900568",
"0.5884531",
"0.58663297",
"0.5865185",
"0.58637923",
"0.58486325",
"0.58378184",
"0.582674",
"0.582506",
"0.582498",
"0.58215237",
"0.5817307",
"0.58161986",
"0.5796195",
"0.5790982",
"0.5734406",
"0.5715341",
"0.56970674",
"0.5683999",
"0.56630445"
] | 0.6343704 | 1 |
Executes a forward pass through all the rotors from right to left Returns the encrypted letter as an integer | def _forward(self, letter):
self._turn_rotors()
l = letter
for i in range(-1, -self.n_rotors - 1, -1):
l = self._rotor_right2left(self.rotors[i], l, self.offsets[i],
self.rings[i])
return l | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def encryptionRotate(text):\n s = text\n transformedChar = \"\"\n transformedChar = s[-1] + s[:-1]\n\n print(\"Single Rotation Encrypted text : \" )\n return transformedChar",
"def rotate_letter(c, num):\n return chr(((ord(c) - 97) + num) % 26 + 97)",
"def incrementRotor(self):\n self._position += 1\n self._position = self._position % 26 \n \n #Ensures next positions are within alphabet range\n for k, v in self.alphabet.items():\n self.alphabet[k] = (v - 1)% 26\n \n #Swaps dictionary format from {‘Key’: value} to {value: ‘Key’} so we can change the characters\n tempAlphabet = {}\n for k, v in self.alphabet.items():\n tempAlphabet[v] = k\n self.alphabet = tempAlphabet\n \n #Changes internal wiring contact to its previous contact\n for k, v in self.alphabet.items():\n charNum = self.ordChar(v)\n prevCharNum = (charNum - 1)% 26\n prevChar = self.chrNum(prevCharNum)\n self.alphabet[k] = prevChar\n \n #Swaps dictionary format from {value: ‘Key’} back to original {‘Key’: value} format\n tempAlphabet = {}\n for k, v in self.alphabet.items():\n tempAlphabet[v] = k\n self.alphabet = tempAlphabet",
"def encryptionMultipleRotate(text, power):\n s = text\n transformedChar = \"\"\n transformedChar = s[-power:] + s[:-(power)]\n\n print(\"Multiple Rotation Encrypted text : \" )\n return transformedChar",
"def encryptionShift(text, index):\n s=text\n transformedChar=\"\"\n transformedChar = ord(s[index]) + 1\n\n if(transformedChar > 90):\n transformedChar=chr(ord(s[index]) + 1 - 26)\n else:\n transformedChar = chr(transformedChar)\n\n print(\"Single Shift Encrypted text: \")\n return s[:index] + transformedChar + s[index+1:]",
"def decryptionShift(text, index):\n s = text;\n transformedChar = \"\"\n transformedChar = ord(s[index]) - 1\n\n if (s[index] == 'A'):\n transformedChar = chr(ord(s[index]) - 1 + 26)\n else:\n transformedChar = chr(ord(s[index]) - 1)\n\n print(\"Single Shift Decrypted text: \" )\n return s[:index] + transformedChar + s[index+1:]",
"def brute_force_decrypt(text):\n for n in range(26):\n print(f\"Using a shift value of {n}\")\n print(decrypt(text, n))\n print(\"\\n***\\n\")",
"def _rotor_right2left(rotor, input_letter, offset, ring):\n\t\talpha_size = len(ALPHABET)\n\t\treturn (rotor[(input_letter + offset - ring) % alpha_size] - offset +\\\n\t\t\t\t\tring) % alpha_size",
"def encryptionMultipleShift(text, index, power):\n s=text\n transformedChar=\"\"\n\n transformedChar = ord(s[index]) + (power % 26)\n if (transformedChar >= 90):\n transformedChar = chr(64 + (transformedChar - 90))\n else:\n transformedChar = chr(transformedChar)\n\n print(\"Multiple Shift Encrypted text : \" )\n return s[:index] + transformedChar + s[index+1:]",
"def decryptionRotate(text):\n s = text;\n transformedChar = \"\"\n transformedChar = s[1:] + s[0]\n\n print(\"Single Rotation Decrypted text : \" )\n return transformedChar",
"def decryptionMultipleRotate(text, power):\n s = text;\n transformedChar = \"\"\n transformedChar = s[power:] + s[0:power]\n\n print(\"Multiple Rotation Decrypted text : \" )\n return transformedChar",
"def rot(c,n):\n if 'a' <= c <= 'z': \n new_ord = ord(c) + n\n if new_ord > ord('z'):\n new_ord = new_ord - 26\n elif 'A' <= c <= 'Z': \n new_ord = ord(c) + n\n if new_ord > ord('Z'):\n new_ord = new_ord - 26\n else: \n new_ord = ord(c)\n return chr(new_ord)",
"def _rotor_left2right(rotor, input_letter, offset, ring):\n\t\tletter = (input_letter + offset - ring) % len(ALPHABET)\n\t\treturn (rotor.index(letter) - offset + ring) % len(ALPHABET)",
"def CipherRun():\r\n ChosenData = UserInput() # retieving user input\r\n Msg, Shift = ChosenData # assigning the message text and shift amount\r\n Cipher = \"\" # placeholder for the encrypted text\r\n\r\n for char in Msg:\r\n if char.isalpha(): # checks if the charcter is a letter\r\n code = ord(char) + Shift # shifts the ordinal position by the requested shift amount\r\n if code > ord('z') and char.islower()==True: # if the char is lowercase and shift is beyound the letter z\r\n code = ord('a') + ((code-ord('z'))-1) # wrap back around to the beginning but take account that you are starting by 1 shift\r\n elif code > ord('Z') and char.isupper() == True: # if the char is upppercase and shift is beyound the letter Z\r\n code = ord('A') + ((code -ord('Z'))-1) # wrap back around to the beginning but take account that you are starting by 1 shift\r\n Cipher += chr(code) # add the character you land on b/c of the shift to the Cipher text\r\n else:\r\n Cipher +=char # add the non letter character to the cipher text\r\n print(Cipher)",
"def decryptionMultipleShift(text, index, power):\n s = text\n transformedChar = \"\"\n\n transformedChar = ord(s[index])\n if (power > 26):\n power = power % 26\n transformedChar = chr((transformedChar - power))\n\n else:\n transformedChar = chr((transformedChar) - power)\n\n print(\"Multiple Shift Decrypted text : \" )\n return s[:index] + transformedChar + s[(index + 1):]",
"def rot(c, n):\n if 'a' <= c <= 'z':\n l = ord(c) + n\n if l > ord('z'):\n l -= 26\n return chr(l)\n elif 'A' <= c <= 'Z':\n l = ord(c) + n\n if l > ord('Z'):\n l -= 26\n return chr(l)\n else:\n return c",
"def rotate_character(char, rot):\n if type(char) != type(''):\n return char\n if type(rot) != type(1):\n return char\n \n if len(char) != 1:\n return char\n if not char.isalpha():\n return char\n letters = lowerLetters\n if char.isupper():\n letters = upperLetters\n \n pos = letters.find(char)\n pos += rot\n pos = pos % 26\n \n return letters[pos]",
"def encryptAffine(letter, a, b):\n if(gcd(7, 26) != 1):\n return \"Error, not a bijection\"\n else:\n encrypted_letter = \"\"\n for i in range(len(letter)):\n if(ord(letter[i]) == 32):\n encrypted_letter += chr(ord(letter[i]))\n else:\n let = letter[i].lower()\n let = ord(let) - 97\n new_let = (((let* a) + b) % 26) + 97\n encrypted_letter += chr(new_let)\n return encrypted_letter",
"def operate_cipher(self):",
"def test_encryption(e, c):\n message = input(\"Enter word to encrypt: \")\n ciphered = ''\n\n for i in range(0, len(message)):\n ciphered = f'{ciphered}{chr(endecrypt(ord(message[i]), e, c))}'\n\n print(ciphered + ' is the ciphered text')\n d = key_cracker(e, c)\n print(\"Plain text is:\")\n for i in range(0, len(ciphered)):\n print(chr(endecrypt(ord(ciphered[i]), d, c)), end='')",
"def rot_encode(shift, txt):\n # list of the alphabet accounting for both upper and lower case. \n # ignore symbols.\n\n alpha = list('abcdefghijklmnopqrstuvwxyz')\n Alpha=list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')\n \n # create list to save encoded characters\n new_txt=[]\n\n for ch in txt:\n\n # check character against alphabets\n if ch in alpha:\n new_txt.append(alpha[(alpha.index(ch) + shift) % 26])\n elif ch in Alpha:\n new_txt.append(Alpha[(Alpha.index(ch) + shift) % 26])\n \n else:\n # if not a character just append the symbol\n new_txt.append(ch)\n\n # return as string\n return ''.join(new_txt)",
"def endecrypt(x, e, c):\n\n return modulo(x, e, c)",
"def adjustRotor(self):\n #Ensures next positions are within alphabet range\n for k, v in self.alphabet.items():\n self.alphabet[k] = (v - 1)% 26 #Loops the alphabet from Z (26th letter) back to A (1st letter)\n \n #Swaps dictionary format from {‘Key’: value} to {value: ‘Key’} so we can change the characters\n tempAlphabet = {}\n for k, v in self.alphabet.items():\n tempAlphabet[v] = k\n self.alphabet = tempAlphabet\n \n #Changes internal wiring contact to its previous contact\n for k, v in self.alphabet.items():\n charNum = self.ordChar(v)\n prevCharNum = (charNum - 1)% 26\n prevChar = self.chrNum(prevCharNum)\n self.alphabet[k] = prevChar\n \n #Swaps dictionary format from {value: ‘Key’} back to original {‘Key’: value} format\n tempAlphabet = {}\n for k, v in self.alphabet.items():\n tempAlphabet[v] = k\n self.alphabet = tempAlphabet",
"def rotate_left_right(self):\n\t\treturn",
"def rotate_right_left(self):\n\t\treturn",
"def encipher(S, n):\n new = ''\n for i in S:\n c = rot(i, n)\n new = new + c\n return new",
"def rotate(message):\n rot_message = ''\n for letter in range(len(message)):\n\n change = 13\n\n # ord() returns the number representing a specific character's unicode\n value = ord(message[letter])\n\n if value < 65 or (value > 90 and value < 97) or value > 122:\n rot_message += chr(value)\n\n elif value + change > 122: # 122 is the last lowercase 'z' in ASCII\n change -= (122 - value)\n change = change % 26 # 26 letters in the alphabet\n\n # 96 is the last character before 'a' in ASCII\n # chr() returns the character that represents\n # the specified unicode number\n rot_message += chr(96 + change)\n\n elif value > 64 and value < 91:\n if value + change > 90:\n rot_message += chr(value - change)\n else:\n rot_message += chr(value + change)\n\n else:\n rot_message += chr(value + change)\n\n return rot_message",
"def encript(self): \n if (len(sys.argv) == Cconfiguration_caesar.DUAL_PARAMETER) and (int(sys.argv[Cconfiguration_caesar.INCREMENTAL_PARAMETER])>=Cconfiguration_caesar.INITIAL_INT_PARAMETER):\n result = \"\"\n k = int(sys.argv[Cconfiguration_caesar.INCREMENTAL_PARAMETER])\n plaintext = input(\"plaintext: \")\n for i in range(len(plaintext)):\n char = plaintext[i]\n if ((Cconfiguration_caesar.ALPHABET_LOWER_INDEX>ord(char)) or (Cconfiguration_caesar.ALPHABET_LOWER_LIMIT<ord(char))) and ((Cconfiguration_caesar.ALPHABET_UPPER_INDEX>ord(char)) or (Cconfiguration_caesar.ALPHABET_UPPER_LIMIT<ord(char))):\n result += char\n elif (char.isupper()):\n result += chr((ord(char) + k-Cconfiguration_caesar.ALPHABET_UPPER_INDEX) % Cconfiguration_caesar.ALPHABET_LIMIT + Cconfiguration_caesar.ALPHABET_UPPER_INDEX)\n else:\n result += chr((ord(char) + k - Cconfiguration_caesar.ALPHABET_LOWER_INDEX) % Cconfiguration_caesar.ALPHABET_LIMIT + Cconfiguration_caesar.ALPHABET_LOWER_INDEX)\n print(f\"ciphertext: {result}\")\n else:\n print(CextraStatusDefinition.COMMAND_LINE_EERROR)\n exit(Cconfiguration_caesar.INCREMENTAL_PARAMETER)",
"def coding():\r\n \r\n key={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}\r\n x=0 #determine the sliding of the letters\r\n \r\n def isKeyEmpty(k):\r\n \"\"\"Utility Function that checks if key is empty\"\"\"\r\n if k=={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}:\r\n return True\r\n return False\r\n \r\n def set_key(vars): #vars=[0]num,[1]rWord,[2]rString\r\n \"\"\"Function that set the new key\"\"\"\r\n nonlocal key\r\n nonlocal x\r\n x=vars[0]\r\n if (vars[1]=='yes'):\r\n key['reverse_word']=True\r\n if (vars[2]=='yes'):\r\n key['reverse_string']=True\r\n if (x<-26 or x>26):\r\n x=x%26 #makes x to be in range\r\n if (x==0):\r\n x=random.randrange(-26,26) #random number\r\n for i in range (97,123): #26 ABC letters, ASCII value of 'a' is 97 97+26=123\r\n if(i+x>122):\r\n key[chr(i)]=chr(i-25+x)\r\n elif (i+x<97):\r\n key[chr(i)]=chr(i+26+x)\r\n else:\r\n key[chr(i)]=chr(i+x)\r\n print(\"done\")\r\n \r\n def empty_key():\r\n \"\"\"Function makes current key empty\"\"\"\r\n nonlocal key\r\n nonlocal x\r\n x=0\r\n key={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}\r\n print(\"done\")\r\n \r\n def export_key():\r\n \"\"\"Function export key\"\"\"\r\n if(isKeyEmpty(key)):\r\n print(\"key empty\")\r\n else:\r\n return key\r\n \r\n def import_key(key2):\r\n \"\"\"Function import key\"\"\"\r\n nonlocal key\r\n if(isKeyEmpty(key2)):\r\n print(\"key is empty\")\r\n else:\r\n key=key2\r\n print(\"done\")\r\n \r\n def encoding(sentence):\r\n \"\"\"function encoding given string with the key\"\"\"\r\n sentence=list(sentence)\r\n for i in range(len(sentence)):\r\n if (sentence[i]!=' '):\r\n sentence[i]=key[sentence[i]]\r\n sentence=''.join(sentence)\r\n if(key['reverse_word']==True):\r\n splitT=tuple(sentence.split(' '))\r\n splitT=map(lambda x:x[::-1],splitT)\r\n sentence=' '.join(splitT)\r\n if(key['reverse_string']==True):\r\n splitList=sentence.split(' ')\r\n splitList=splitList[-1::-1]\r\n sentence=' '.join(splitList)\r\n return sentence\r\n \r\n def decoding(sentence):\r\n \"\"\"function decoding given string with the key\"\"\"\r\n if(isKeyEmpty(key)):\r\n return \"key empty\"\r\n helpKey=dict((y,x) for x,y in key.items())\r\n if(key['reverse_word']==True):\r\n splitT=tuple(sentence.split(' '))\r\n splitT=map(lambda x:x[::-1],splitT)\r\n sentence=' '.join(splitT)\r\n if(key['reverse_string']==True):\r\n splitList=sentence.split(' ')\r\n splitList=splitList[-1::-1]\r\n sentence=' '.join(splitList)\r\n sentence=list(sentence)\r\n for i in range(len(sentence)):\r\n if(sentence[i]!=' '):\r\n sentence[i]=helpKey[sentence[i]]\r\n sentence=''.join(sentence)\r\n return sentence\r\n\r\n def dispatch(message,var=None):\r\n \"\"\"dispatch with message passing\"\"\"\r\n if message=='set_key':\r\n set_key(var)\r\n elif message=='empty_key':\r\n empty_key()\r\n elif message=='export_key':\r\n return export_key()\r\n elif message=='import_key':\r\n import_key(var)\r\n elif message=='encoding':\r\n return encoding(var)\r\n elif message=='decoding':\r\n return decoding(var)\r\n else:\r\n print(\"Unknown message\") \r\n return dispatch",
"def applyCoder(text, coder):\n ciphertext = str()\n #for each letter in the text find it, and grab shifted letter\n for letter in text:\n ciphertext += coder.get(letter, letter)\n return ciphertext"
] | [
"0.66399425",
"0.6564686",
"0.65551734",
"0.6526592",
"0.6486069",
"0.6390609",
"0.63876545",
"0.63511467",
"0.63508856",
"0.6246106",
"0.6229571",
"0.618497",
"0.61506116",
"0.6106474",
"0.6098463",
"0.60835016",
"0.60799736",
"0.6076406",
"0.60617894",
"0.60591775",
"0.604564",
"0.6038704",
"0.6035852",
"0.60333234",
"0.6019405",
"0.60032135",
"0.59783965",
"0.5977343",
"0.59679985",
"0.59670544"
] | 0.6679923 | 0 |
Given the letter returned by the reflector, executes a backward pass, cyphering the input letter in all rotors from left to right Returns the output letter as an integer | def _backwards(self, letter):
l = letter
for i in range(self.n_rotors):
l = self._rotor_left2right(self.rotors[i], l, self.offsets[i],
self.rings[i])
return l | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _forward(self, letter):\n\t\tself._turn_rotors()\n\t\tl = letter\n\t\tfor i in range(-1, -self.n_rotors - 1, -1):\n\t\t\tl = self._rotor_right2left(self.rotors[i], l, self.offsets[i],\n\t\t\t\t\t\t\t\t\tself.rings[i])\n\t\treturn l",
"def cipherFromReflector(self, char):\n inputCharNum = self.GetNumByChar(char) #Finds the index of the value for the input character to find the internal wiring connection\n outputChar = self.chrNum(inputCharNum) #Finds the ASCII code of the index value to find the external rotor connection \n return outputChar #Returns the external character that the wiring is connected to",
"def decryptionRotate(text):\n s = text;\n transformedChar = \"\"\n transformedChar = s[1:] + s[0]\n\n print(\"Single Rotation Decrypted text : \" )\n return transformedChar",
"def decryptionShift(text, index):\n s = text;\n transformedChar = \"\"\n transformedChar = ord(s[index]) - 1\n\n if (s[index] == 'A'):\n transformedChar = chr(ord(s[index]) - 1 + 26)\n else:\n transformedChar = chr(ord(s[index]) - 1)\n\n print(\"Single Shift Decrypted text: \" )\n return s[:index] + transformedChar + s[index+1:]",
"def decryptionMultipleRotate(text, power):\n s = text;\n transformedChar = \"\"\n transformedChar = s[power:] + s[0:power]\n\n print(\"Multiple Rotation Decrypted text : \" )\n return transformedChar",
"def rotate_letter(c, num):\n return chr(((ord(c) - 97) + num) % 26 + 97)",
"def encryptionRotate(text):\n s = text\n transformedChar = \"\"\n transformedChar = s[-1] + s[:-1]\n\n print(\"Single Rotation Encrypted text : \" )\n return transformedChar",
"def rotate_character(char, rot):\n if type(char) != type(''):\n return char\n if type(rot) != type(1):\n return char\n \n if len(char) != 1:\n return char\n if not char.isalpha():\n return char\n letters = lowerLetters\n if char.isupper():\n letters = upperLetters\n \n pos = letters.find(char)\n pos += rot\n pos = pos % 26\n \n return letters[pos]",
"def affine_decipher_letter(letter, multiplier=1, adder=0, one_based=True):\n if letter in string.ascii_letters:\n cipher_number = pos(letter)\n if one_based: cipher_number += 1\n plaintext_number = ( \n modular_division_table[multiplier, (cipher_number - adder) % 26]\n )\n if one_based: plaintext_number -= 1\n if letter in string.ascii_uppercase:\n return unpos(plaintext_number).upper()\n else:\n return unpos(plaintext_number) \n else:\n return letter",
"def cipherFromExternalContact(self, char):\n self.incrementRotor() #Incements the rotor as the key is 'pressed' on the enigma machine\n char = self.cipherToReflector(char) #Ciphers a character in the forward direction to the reflector\n return char #Returns character",
"def cipherToReflector(self, char):\n inputCharNum = self.ordChar(char) #Finds the index value of the input ASCII code to find the external rotor connection\n outputChar = self.GetCharByNum(inputCharNum) #Finds the corresponding character of the input to find the internal wiring connection\n return outputChar #Finds the internal wiring contact that the character is connected to",
"def encryptionMultipleRotate(text, power):\n s = text\n transformedChar = \"\"\n transformedChar = s[-power:] + s[:-(power)]\n\n print(\"Multiple Rotation Encrypted text : \" )\n return transformedChar",
"def decryptAffine(letter, a, b):\n decrypted_letter = \"\"\n for i in range(len(letter)):\n if(ord(letter[i]) == 32):\n decrypted_letter += chr(ord(letter[i]))\n else:\n let = letter[i].lower()\n let = ord(let) - 97\n new_letter = let - b\n inverse = multInverse(a, 26)\n final_letter = ((new_letter * inverse) % 26) + 97\n decrypted_letter += chr(final_letter)\n return decrypted_letter",
"def decryptionMultipleShift(text, index, power):\n s = text\n transformedChar = \"\"\n\n transformedChar = ord(s[index])\n if (power > 26):\n power = power % 26\n transformedChar = chr((transformedChar - power))\n\n else:\n transformedChar = chr((transformedChar) - power)\n\n print(\"Multiple Shift Decrypted text : \" )\n return s[:index] + transformedChar + s[(index + 1):]",
"def incrementRotor(self):\n self._position += 1\n self._position = self._position % 26 \n \n #Ensures next positions are within alphabet range\n for k, v in self.alphabet.items():\n self.alphabet[k] = (v - 1)% 26\n \n #Swaps dictionary format from {‘Key’: value} to {value: ‘Key’} so we can change the characters\n tempAlphabet = {}\n for k, v in self.alphabet.items():\n tempAlphabet[v] = k\n self.alphabet = tempAlphabet\n \n #Changes internal wiring contact to its previous contact\n for k, v in self.alphabet.items():\n charNum = self.ordChar(v)\n prevCharNum = (charNum - 1)% 26\n prevChar = self.chrNum(prevCharNum)\n self.alphabet[k] = prevChar\n \n #Swaps dictionary format from {value: ‘Key’} back to original {‘Key’: value} format\n tempAlphabet = {}\n for k, v in self.alphabet.items():\n tempAlphabet[v] = k\n self.alphabet = tempAlphabet",
"def rot(c, n):\n if 'a' <= c <= 'z':\n l = ord(c) + n\n if l > ord('z'):\n l -= 26\n return chr(l)\n elif 'A' <= c <= 'Z':\n l = ord(c) + n\n if l > ord('Z'):\n l -= 26\n return chr(l)\n else:\n return c",
"def adjustRotor(self):\n #Ensures next positions are within alphabet range\n for k, v in self.alphabet.items():\n self.alphabet[k] = (v - 1)% 26 #Loops the alphabet from Z (26th letter) back to A (1st letter)\n \n #Swaps dictionary format from {‘Key’: value} to {value: ‘Key’} so we can change the characters\n tempAlphabet = {}\n for k, v in self.alphabet.items():\n tempAlphabet[v] = k\n self.alphabet = tempAlphabet\n \n #Changes internal wiring contact to its previous contact\n for k, v in self.alphabet.items():\n charNum = self.ordChar(v)\n prevCharNum = (charNum - 1)% 26\n prevChar = self.chrNum(prevCharNum)\n self.alphabet[k] = prevChar\n \n #Swaps dictionary format from {value: ‘Key’} back to original {‘Key’: value} format\n tempAlphabet = {}\n for k, v in self.alphabet.items():\n tempAlphabet[v] = k\n self.alphabet = tempAlphabet",
"def rot(c,n):\n if 'a' <= c <= 'z': \n new_ord = ord(c) + n\n if new_ord > ord('z'):\n new_ord = new_ord - 26\n elif 'A' <= c <= 'Z': \n new_ord = ord(c) + n\n if new_ord > ord('Z'):\n new_ord = new_ord - 26\n else: \n new_ord = ord(c)\n return chr(new_ord)",
"def turn_clockwise(a):\r\n if a==\"N\":\r\n return \"E\"\r\n elif a==\"E\":\r\n return \"S\"\r\n elif a==\"S\":\r\n return \"W\"\r\n elif a==\"W\":\r\n return \"N\"",
"def _rotor_left2right(rotor, input_letter, offset, ring):\n\t\tletter = (input_letter + offset - ring) % len(ALPHABET)\n\t\treturn (rotor.index(letter) - offset + ring) % len(ALPHABET)",
"def rotate_character(char, rot):\n if char.isalpha() == False:\n return char\n else:\n original_pos = alphabet_position(char)\n new_pos = original_pos + rot\n\n if new_pos >=26:\n new_pos = new_pos % 26\n\n alphabet='abcdefghijklmnopqrstuvwxyz'\n ALPHABET='ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n \n if char.isupper() == True:\n new_char = ALPHABET[new_pos]\n else:\n new_char = alphabet[new_pos]\n \n return new_char",
"def prev_letter(letter, step):\r\n\r\n\tif letter in ascii_uppercase:\r\n\t\tnew_letter = get_new_letter(ascii_uppercase, letter, -step)\r\n\telif letter in ascii_lowercase:\r\n\t\tnew_letter = get_new_letter(ascii_lowercase, letter, -step)\r\n\telse:\r\n\t\tnew_letter = letter\r\n\treturn new_letter",
"def decrypt(text, shift):\n decrypted_text = list(range(len(text)))\n alphabet = string.ascii_lowercase\n first_half = alphabet[:shift]\n second_half = alphabet[shift:]\n shifted_alphabet = second_half + first_half\n\n for i, letter in enumerate(text.lower()):\n\n if letter in alphabet:\n index = shifted_alphabet.index(letter)\n original_letter = alphabet[index]\n decrypted_text[i] = original_letter\n else:\n decrypted_text[i] = letter\n\n return \"\".join(decrypted_text)",
"def decrypt(phrase, offset):\n return encrypt(phrase, 26 - offset) #Encrypting then decrypting by the same number will in effect encrypt by 26, looping back to the starting letters",
"def _get_rot_letter(self, letter, rot, operation, alphabet):\n if len(letter) != 1:\n raise ValueError(\"'letter' deve ter length 1.\")\n\n if letter not in alphabet:\n letter_new = letter\n\n else:\n letter_pos = alphabet.index(letter)\n letter_new = alphabet[operation(letter_pos, rot) % len(alphabet)]\n \n return letter_new",
"def backward_character():\r\n set_point(point().offset(-1))",
"def coding():\r\n \r\n key={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}\r\n x=0 #determine the sliding of the letters\r\n \r\n def isKeyEmpty(k):\r\n \"\"\"Utility Function that checks if key is empty\"\"\"\r\n if k=={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}:\r\n return True\r\n return False\r\n \r\n def set_key(vars): #vars=[0]num,[1]rWord,[2]rString\r\n \"\"\"Function that set the new key\"\"\"\r\n nonlocal key\r\n nonlocal x\r\n x=vars[0]\r\n if (vars[1]=='yes'):\r\n key['reverse_word']=True\r\n if (vars[2]=='yes'):\r\n key['reverse_string']=True\r\n if (x<-26 or x>26):\r\n x=x%26 #makes x to be in range\r\n if (x==0):\r\n x=random.randrange(-26,26) #random number\r\n for i in range (97,123): #26 ABC letters, ASCII value of 'a' is 97 97+26=123\r\n if(i+x>122):\r\n key[chr(i)]=chr(i-25+x)\r\n elif (i+x<97):\r\n key[chr(i)]=chr(i+26+x)\r\n else:\r\n key[chr(i)]=chr(i+x)\r\n print(\"done\")\r\n \r\n def empty_key():\r\n \"\"\"Function makes current key empty\"\"\"\r\n nonlocal key\r\n nonlocal x\r\n x=0\r\n key={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}\r\n print(\"done\")\r\n \r\n def export_key():\r\n \"\"\"Function export key\"\"\"\r\n if(isKeyEmpty(key)):\r\n print(\"key empty\")\r\n else:\r\n return key\r\n \r\n def import_key(key2):\r\n \"\"\"Function import key\"\"\"\r\n nonlocal key\r\n if(isKeyEmpty(key2)):\r\n print(\"key is empty\")\r\n else:\r\n key=key2\r\n print(\"done\")\r\n \r\n def encoding(sentence):\r\n \"\"\"function encoding given string with the key\"\"\"\r\n sentence=list(sentence)\r\n for i in range(len(sentence)):\r\n if (sentence[i]!=' '):\r\n sentence[i]=key[sentence[i]]\r\n sentence=''.join(sentence)\r\n if(key['reverse_word']==True):\r\n splitT=tuple(sentence.split(' '))\r\n splitT=map(lambda x:x[::-1],splitT)\r\n sentence=' '.join(splitT)\r\n if(key['reverse_string']==True):\r\n splitList=sentence.split(' ')\r\n splitList=splitList[-1::-1]\r\n sentence=' '.join(splitList)\r\n return sentence\r\n \r\n def decoding(sentence):\r\n \"\"\"function decoding given string with the key\"\"\"\r\n if(isKeyEmpty(key)):\r\n return \"key empty\"\r\n helpKey=dict((y,x) for x,y in key.items())\r\n if(key['reverse_word']==True):\r\n splitT=tuple(sentence.split(' '))\r\n splitT=map(lambda x:x[::-1],splitT)\r\n sentence=' '.join(splitT)\r\n if(key['reverse_string']==True):\r\n splitList=sentence.split(' ')\r\n splitList=splitList[-1::-1]\r\n sentence=' '.join(splitList)\r\n sentence=list(sentence)\r\n for i in range(len(sentence)):\r\n if(sentence[i]!=' '):\r\n sentence[i]=helpKey[sentence[i]]\r\n sentence=''.join(sentence)\r\n return sentence\r\n\r\n def dispatch(message,var=None):\r\n \"\"\"dispatch with message passing\"\"\"\r\n if message=='set_key':\r\n set_key(var)\r\n elif message=='empty_key':\r\n empty_key()\r\n elif message=='export_key':\r\n return export_key()\r\n elif message=='import_key':\r\n import_key(var)\r\n elif message=='encoding':\r\n return encoding(var)\r\n elif message=='decoding':\r\n return decoding(var)\r\n else:\r\n print(\"Unknown message\") \r\n return dispatch",
"def encryptionShift(text, index):\n s=text\n transformedChar=\"\"\n transformedChar = ord(s[index]) + 1\n\n if(transformedChar > 90):\n transformedChar=chr(ord(s[index]) + 1 - 26)\n else:\n transformedChar = chr(transformedChar)\n\n print(\"Single Shift Encrypted text: \")\n return s[:index] + transformedChar + s[index+1:]",
"def decryptionSelfMadeFunction(text,index):\n s = text\n transformedChar = \"\"\n\n transformedChar = s[:index] + s[-1] + s[index:len(s)-1]\n\n print(\"Decrypted Transformed text : \" )\n return transformedChar",
"def applyCoder(text, coder):\n ciphertext = str()\n #for each letter in the text find it, and grab shifted letter\n for letter in text:\n ciphertext += coder.get(letter, letter)\n return ciphertext"
] | [
"0.6735222",
"0.6486761",
"0.6389943",
"0.6337723",
"0.6333011",
"0.6282108",
"0.6205114",
"0.61938417",
"0.6165998",
"0.61477476",
"0.6132617",
"0.6115616",
"0.6112447",
"0.60500765",
"0.60370743",
"0.60039794",
"0.5961587",
"0.59385735",
"0.5931577",
"0.5929782",
"0.59091127",
"0.58977544",
"0.588406",
"0.5872559",
"0.58423793",
"0.5825653",
"0.582488",
"0.58078015",
"0.58033824",
"0.5797805"
] | 0.66574705 | 1 |
Decrypts text. The configuration should be the same as the initial configuration used by the machine for encryption. Use the reset method to reset the offsets if necessary. | def decrypt(self, text):
if self.offsets != self.start_off:
raise Exception("Current offset != starting offset. Use the reset"+\
" method before decrypting.")
return self.encrypt(text) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def decrypt(text, offset):\r\n return format_text(text, -offset)",
"def decrypt(self, text):\n return self.encrypt(text)",
"def decrypt(text,key):\r\n aes = pyaes.AESModeOfOperationCTR(key)\r\n decrypted = aes.decrypt(text)\r\n return decrypted",
"def decrypt(text, address, path):\n client = ConfigClient(address=address, fail_fast=False)\n cipher = re.match(r\"^.?{cipher}?(?P<name>\\w.*)\", text)\n if cipher:\n text = cipher.group(\"name\")\n\n try:\n resp = client.decrypt(text, path=path)\n except Exception:\n raise click.ClickException(\"💥 Failed to contact server!\")\n\n table = Table.grid(padding=(0, 1))\n table.add_column(style=\"cyan\", justify=\"right\")\n table.add_column(style=\"magenta\")\n\n table.add_row(\"decrypted data[yellow]:[/yellow] \", f\"'{resp}'\")\n console.print(Panel(table, border_style=\"yellow\", expand=True))",
"def decrypt(self):\n # Grab the initialization vector from the front of the cipher-text\n iv = self.ciphertext[:AES.block_size]\n # Create a new AES object in Cipher Block Chaining mode\n cipher = AES.new(self.key, AES.MODE_CBC, iv)\n return cipher.decrypt(self.ciphertext)[AES.block_size:].rstrip().decode(\"utf-8\"), iv",
"def decrypt(text, offset):\n decrypted_text = \"\"\n for char in text:\n if ord(char) <= 64:\n decrypted_character = chr(ord(char))\n elif ord(char) <= 90:\n decrypted_character = ord(char) - offset\n if decrypted_character < 65:\n decrypted_character += 26\n decrypted_character = chr(decrypted_character)\n else:\n decrypted_character = ord(char) - offset\n if decrypted_character < 97:\n decrypted_character += 26\n decrypted_character = chr(decrypted_character)\n decrypted_text += decrypted_character\n\n return decrypted_text",
"def decrypt(self, encText, previouslyProcessedData=None):\n if previouslyProcessedData is None:\n length = len(self.oldDecrypt)\n if length % BLOCK_SIZE == 0:\n previouslyProcessedData = length\n else:\n previouslyProcessedData = int(\n BLOCK_SIZE * math.floor(length / BLOCK_SIZE)\n )\n\n # previouslyProcessedData was passed by the parent: it means that a frame was decoded and there was some data left. This does not include the padding zeros\n if previouslyProcessedData % BLOCK_SIZE != 0:\n previouslyProcessedData = int(\n BLOCK_SIZE * math.ceil(previouslyProcessedData / BLOCK_SIZE)\n )\n\n remainingData = self.oldDecrypt[previouslyProcessedData:]\n if self.oldDecrypt != b\"\":\n self.decryptIV = self.oldDecrypt[\n previouslyProcessedData - BLOCK_SIZE : previouslyProcessedData\n ]\n\n self.oldDecrypt = encText # save current block\n\n toDecrypt = truncate_multiple(remainingData + encText, BLOCK_SIZE)\n decryptor = RijndaelCbc(\n self.key,\n self.decryptIV,\n padding=ZeroPadding(BLOCK_SIZE),\n block_size=BLOCK_SIZE,\n )\n return decryptor.decrypt(toDecrypt)",
"def decipher(ciphered_text: str, key: int, charset: str = DEFAULT_CHARSET) -> str:\n deciphered_text = _offset_text(ciphered_text, key, False, Ciphers.CAESAR, charset)\n return deciphered_text",
"def FtDecrypt(self,EncryptText):\n \n self.EncryptText = EncryptText\n characters = \"abcdefghijklmnopqrstuvwxyz \"\n DecripText = ''\n\n #attempt to decrypt the text using the made_key and EncryptText \n try:\n for item in self.EncryptText:\n DecripText += Code_Fouad_Teniou.my_dict[item]\n\n return DecripText\n \n #Raise KeyError if a different key was used to encrypt the text \n except KeyError:\n print \"\\n<Please use the right code(made_key) to decrypt your text\"",
"def decrypt(self, string):\n return self.__Cipher(self.OP_DECRYPT).update(string)",
"def decrypt(self, ciphertext: str) -> str:\n\n return self.run(ciphertext, Cryptography.DECRYPT)",
"def decrypt(self, text):\n\n decrypted_word = []\n for letter in text:\n try:\n index = self.alpha.index(letter)\n except ValueError:\n decrypted_word.append(letter)\n else:\n # Uses Affine decryption function to decrypt the word\n new_index = ((21*(index-self.b)) % self.m)\n decrypted_word.append(self.alpha[new_index])\n return \"\".join(decrypted_word)",
"def decrypt(self, enc, use_base64=True, decode_text=True):\n if use_base64:\n enc = base64.b64decode(enc)\n\n decryptor = self.cipher.decryptor()\n raw = self._unpad(decryptor.update(enc) + decryptor.finalize())\n return raw.decode(\"utf-8\") if decode_text else raw",
"def decrypt(\r\n key: bytes,\r\n cipher_text: bytes,\r\n) -> str:\r\n block_size = 16\r\n iv = cipher_text[:block_size]\r\n cipher = AES.new(key, AES.MODE_CBC, iv)\r\n plain_text = cipher.decrypt(cipher_text[block_size:]).decode('utf-8')\r\n return _unpad(plain_text)",
"def decrypt(self, data):",
"def decrypt(self, ciphertext):\n return self._transform(ciphertext, self._backward)",
"def decrypt(phrase, offset):\n return encrypt(phrase, 26 - offset) #Encrypting then decrypting by the same number will in effect encrypt by 26, looping back to the starting letters",
"def decipher(self):\n plaintext = \"\"\n for ct, key_char in zip(self.text, self.key):\n char_index = self.char_block.rows[key_char].index(ct)\n plaintext += self.char_block.alphabet[char_index]\n print(plaintext)",
"def decrypt_string(self, encrypted_string):\n return self.fernet_instance.decrypt(encrypted_string.encode('utf-8')).decode('utf-8')",
"def decrypt_text_file(self):\r\n\t\t#Ensures that the file has something that can be decrypted.\r\n\t\tfile_contains_message = True\r\n\t\twhile file_contains_message:\r\n\t\t\tfile_exists = True\r\n\t\t\t#Checks to see if the file exists.\r\n\t\t\twhile file_exists:\r\n\t\t\t\tself.text_file_name = input(\"Please enter the name of the text file you wish to decrypt in format |file_name.txt|.--> \")\r\n\t\t\t\tif \".txt\" in self.text_file_name:\r\n\t\t\t\t\tfile_exists = Doc_Control().check_for_file(self.text_file_name)\r\n\t\t\t\telse: \r\n\t\t\t\t\tcontinue\r\n\t\t\t#Decrypts message but verifys correct key before giving user their decrypted message.\r\n\t\t\twhile True: \r\n\t\t\t\tself.message = Doc_Control().open_file(self.text_file_name)\r\n\t\t\t\tif self.message != \"\" and len(self.message) > 4:\r\n\t\t\t\t\tfile_contains_message = False\r\n\t\t\t\t\tbreak\r\n\t\t\t\telse:\r\n\t\t\t\t\tprint(\"Your file does not contain an encryptable message.\")\r\n\t\t\t\t\tbreak\r\n\t\tself.right_key = True\r\n\t\twhile self.right_key:\r\n\t\t\tself.setup_key_decrypt()\r\n\t\t\tself.my_code = Decryptor(self.message, self.key).transfer_decrypt()\r\n\t\t\tself.verify_decrypt_key()\r\n\t\tself.output_file = Doc_Control().assign_output_file()\r\n\t\toutput_file_obj = open(self.output_file, 'w')\r\n\t\toutput_file_obj.write(self.my_code)\r\n\t\toutput_file_obj.close()\t\t\r\n\t\tprint(\"\\nYour file has been decrypted.\")",
"def AES_decrypt(ciphertext: bytes) -> Text:\n text = b64decode(ciphertext)\n cipher = AES.new(secret_key, mode, IV)\n return Padding.unpad(cipher.decrypt(text), bs).decode('utf-8')",
"def decrypt(self, data):\n if not data:\n return ''\n data = self._crypt(data, self.DECRYPT)\n return self._unpad_data(data)",
"def decrypt(self, ciphertext):\n\n # Note that the state of the cipher is updated by each operation,\n # and the offset into the stream is implicit, which means that\n # it is almost always an error to use the encrypt and decrypt\n # methods of the same instance, so we do a simple check to ensure\n # that this isn't the case.\n #\n if self.prev_crypto_op and self.prev_crypto_op != self.decrypt:\n raise RuntimeError('Same instance used for encrypt/decrypt')\n self.prev_crypto_op = self.decrypt\n\n return self.rc4.update(ciphertext)",
"def decrypt(self, text):\n\n output = []\n text = text.upper()\n\n for char in text:\n try:\n index = self.alpha.index(char)\n except ValueError:\n output.append(char)\n else:\n output.append(self.alpha[21 * (index - 8) % 26])\n return \"\".join(output)",
"def _decrypt_update(\r\n self,\r\n cipher_text: int,\r\n ) -> Update:\r\n # Ensure byte alignment of 16 because of CBC mode\r\n byte_length = math.ceil(math.log(cipher_text, 2) / 8 / 16) * 16\r\n cipher_text_bytes: bytes = int.to_bytes(cipher_text, byteorder='big', length=byte_length)\r\n update_str: str = decrypt(self.k, cipher_text_bytes)\r\n (t, op, ind, w) = update_str.split(',')\r\n return int(t), Op(int(op)), int(ind), w",
"def decrypt(self, ciphertext, key):\n iv = ciphertext[:AES.block_size]\n cipher = AES.new(key, AES.MODE_CBC, iv, segment_size=64)\n plaintext = cipher.decrypt(ciphertext[AES.block_size:])\n return self.pkcs7_unpad(plaintext)",
"def weaksauce_decrypt(text, password):\n offset = sum([ord(x) for x in password])\n decoded = ''.join(\n chr(max(ord(x) - offset, 0))\n for x in text\n )\n return decoded",
"def _decrypt_string(self, event):\n _LOGGER.debug(\"Hub: Decrypt String: Original: %s\", str(event.encrypted_content))\n resmsg = self._decrypter.decrypt(unhexlify(event.encrypted_content)).decode(\n encoding=\"UTF-8\", errors=\"replace\"\n )\n _LOGGER.debug(\"Hub: Decrypt String: Decrypted: %s\", resmsg)\n event.parse_decrypted(resmsg)",
"def Decrypt(self, data):\n\n data = base64.b64decode(data)\n es = AES.new(self.creds.aesKey, AES.MODE_CBC, self.creds.aesIV)\n solved = \"\"\n try:\n solved = es.decrypt(data)\n except ValueError:\n stdout.write(\"Error, corrupted file.\\n\\n\")\n return \"%errorpass:1234123412341234%\"\n\n return solved",
"def decrypt(text: str, key: str = None):\n if not text.isdecimal():\n raise ValueError(\"Encrypted text must contain only numbers.\")\n tmpres = []\n lkey = []\n if key is not None:\n lkey = list(key.encode(\"utf-8\"))\n i = 0\n counter = 0\n while i < len(text):\n l = int(text[i])\n tmp = text[i + 1:i + l + 1]\n i += l + 1\n if not tmp:\n break\n if lkey:\n c = int(tmp) - lkey[counter % len(lkey)]\n else:\n pm = 1 if tmp[0] == \"0\" else -1\n ri = int(tmp[1]) * pm\n c = int(tmp[2:]) - ri\n tmpres.append(c)\n counter += 1\n return bytes(tmpres).decode(\"utf8\")"
] | [
"0.7697106",
"0.7482718",
"0.7030502",
"0.6725311",
"0.66860723",
"0.6614009",
"0.65919447",
"0.6583637",
"0.65643156",
"0.65093535",
"0.64935434",
"0.6470546",
"0.6464969",
"0.64432144",
"0.6407435",
"0.64009774",
"0.63371176",
"0.6336156",
"0.63166344",
"0.63100886",
"0.625975",
"0.619401",
"0.61878586",
"0.6158671",
"0.61550784",
"0.6114354",
"0.610665",
"0.60788536",
"0.6067246",
"0.6044019"
] | 0.8522153 | 0 |
FindFile(file) Attempts to locate file in any of the mods folders. If file is a full path, it will attempt to use the GetFile() function to split the folder and file from the full path. Returns a tuple of (folder, file) in either case. If the file is not a full path and can't be found, it will raise FileNotFoundError, giving the file as argument. | def FindFile(seeker):
for folder in var.MOD_LOCATION:
for file in os.listdir(folder):
if file.lower() == seeker.lower():
if not folder.endswith(("/", "\\")):
folder = folder + "\\"
return folder, file
if True in [slash in seeker for slash in ("/", "\\")]:
return GetFile(seeker) # Full path
raise FileNotFoundError(seeker) # Exit out if the mod could not be found | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def recursively_find_file(folder, file_name):\n # TODO: print a hint when not founding file_name",
"def checkFilePath(self, filename, searchpath=[]):\n\t\tif filename is None:\n\t\t\treturn None\n\t\telif os.path.isfile(filename):\n\t\t\treturn filename\n\t\telse:\n\t\t\t# Append current dir to searchpath and try each in turn\n\t\t\tsearchpath.append(os.path.dirname(__file__))\n\t\t\t# print(searchpath)\n\t\t\tfor folder in searchpath:\n\t\t\t\tfilepath = os.path.join(folder, filename)\n\t\t\t\tif os.path.isfile(filepath):\n\t\t\t\t\treturn filepath\n\n\t\t# File not found\n\t\treturn None",
"def find_file(file_path=None, args=None, locations=DEFAULT_LOCATIONS,\n file_name='weewx.conf'):\n\n # Start by searching args (if available)\n if file_path is None and args:\n for i in range(len(args)):\n if not args[i].startswith('-'):\n file_path = args[i]\n del args[i]\n break\n\n if file_path is None:\n for directory in locations:\n # If this is a relative path, then prepend with the\n # directory this file is in:\n if not directory.startswith('/'):\n directory = os.path.join(os.path.dirname(__file__), directory)\n candidate = os.path.abspath(os.path.join(directory, file_name))\n if os.path.isfile(candidate):\n return candidate\n\n if file_path is None:\n raise IOError(\"Unable to find file '%s'. Tried directories %s\" %\n (file_name, locations))\n elif not os.path.isfile(file_path):\n raise IOError(\"%s is not a file\" % file_path)\n\n return file_path",
"def find_file(self, filename, pathlist = ['.']):\n if filename.startswith('http://') or filename.startswith('https://'):\n return (urlopen(filename), filename)\n for path in [''] + pathlist:\n filepath = abspath(path + '/' + filename)\n if isfile(filepath):\n f = open(filepath, 'r')\n return (f, filepath)\n raise FileNotFoundError(filename, pathlist)",
"def check_file(file: Path):\n if Path(file).is_file() or file == \"\":\n return file\n else:\n files = glob.glob(\"./**/\" + file, recursive=True) # find file\n FILE_NOT_FOUND_MSG = f\"File Not Found: {file}\"\n MULTIPLE_FILE_MSG = f\"Multiple files match '{file}', specify exact path:{files}\"\n\n assert len(files), FILE_NOT_FOUND_MSG # assert file was found\n assert len(files) == 1, MULTIPLE_FILE_MSG # assert unique\n return files[0] # return file",
"def search_file(filename, search_path, pathsep=os.pathsep):\n for path in string.split(search_path, pathsep):\n candidate = os.path.join(path, filename)\n if os.path.exists(candidate): return os.path.abspath(candidate)\n return None",
"def search_file(filename, search_path, pathsep=os.pathsep):\n for path in string.split(search_path, pathsep):\n candidate = os.path.join(path, filename)\n if os.path.exists(candidate): return os.path.abspath(candidate)\n return None",
"def _find_file(config, startswith=False):\n remote_files = _get_remote_files(config)\n if startswith:\n remote_folders = {}\n for fname, (pid, _) in remote_files.items():\n remote_folders[os.path.dirname(fname)] = (pid, None)\n remote_files = remote_folders\n\n def glob_match(f1, f2):\n \"\"\"Check for wildcard glob style matches.\n \"\"\"\n if f1.find(\"*\") >= 0:\n if fnmatch.fnmatch(f2, \"*/%s\" % f1):\n return True\n\n def get_file(f):\n if _is_remote(f):\n f = _get_id_fname(f)[-1]\n # handle both bare lookups and project-prefixed\n if f.find(\":\") > 0:\n fproject, f = f.split(\":\")\n else:\n fproject = None\n # check for exact matches\n for project, folder in _remote_folders(config):\n if fproject is None or fproject == project:\n folder_f = os.path.join(folder, f)\n if folder_f in remote_files:\n pid, fid = remote_files[folder_f]\n return \"%s:%s/%s:%s\" % (KEY, fid, pid, folder_f)\n # find any files nested in sub folders or as globs\n out = []\n for project, folder in _remote_folders(config):\n for rfname, (pid, rid) in remote_files.items():\n if rfname.startswith(folder + \"/\") and (rfname.endswith(\"/\" + f) or glob_match(f, rfname)):\n out.append(\"%s:%s/%s:%s\" % (KEY, rid, pid, rfname))\n if len(out) == 1:\n return out[0]\n elif len(out) > 1:\n return out\n return get_file",
"def find_file_match(folder, end):\n if not os.path.exists(folder):\n return None\n if not os.path.isdir(folder):\n return None\n\n dir_list = os.listdir(folder)\n subdirs = []\n\n # First try to find the file. Save any sub folders for later\n for entry in dir_list:\n # Skip over hidden files\n if entry[0] == '.':\n continue\n\n # Check the name to see if it's a file and if it first the descrioption\n test_path = os.path.join(folder, entry)\n if os.path.isfile(test_path):\n if test_path.endswith(end):\n return test_path\n else:\n subdirs.append(entry)\n\n # Loop through sub folders\n subdirs_len = len(subdirs)\n if subdirs_len > 0:\n for one_dir in subdirs:\n found = find_file_match(os.path.join(folder, one_dir), end)\n if not found is None:\n return found\n\n return None",
"def find(name):\n\n if os.path.exists(name):\n return name\n\n path = os.path.dirname(__file__) or '.'\n filename = os.path.abspath(os.path.join(path,name))\n if os.path.exists(filename):\n return filename\n\n for d in os.listdir(path):\n fullpath = os.path.abspath(os.path.join(path,d))\n if os.path.isdir(fullpath):\n filename = os.path.abspath(os.path.join(fullpath,name))\n if os.path.exists(filename):\n return filename\n return None",
"def search_file(file_name, search_path):\n if os.path.exists(search_path):\n filenames = os.listdir(search_path)\n for filename in filenames:\n if file_name in filenames:\n return os.path.join(search_path, file_name)\n return None",
"def find_file_in_folders(filename, folders):\n for folder in folders:\n folder = os.path.expanduser(folder)\n full_path = os.path.join(folder, filename)\n if os.path.exists(full_path):\n return full_path\n raise FileNotFoundError(\"Did not find file %s!\" % filename)",
"def search_file(filename, search_path):\n file_path = None\n for path in search_path:\n if exists(join(path, filename)):\n file_path = path\n break\n if file_path:\n return abspath(join(file_path, filename))\n return None",
"def get_file_by_name(mod_files: List[PatchedFile], filename: str):\n for mod in mod_files:\n if mod.path == filename:\n return mod\n return None",
"def findFoamFile(foamFile, foamCase='.'):\n # check first if the path is absolute or relative. \n if len(foamFile.split('/'))==1: \n foamFiles = getFoamFiles(foamCase)\n foamFilePath = [elem for elem in foamFiles if elem.endswith('/' + foamFile)]\n if len(foamFilePath)==0:\n print('foamFile ' + foamFile + ' could not be found.')\n foamFilePath =''\n else:\n foamFilePath = foamFilePath[0]\n else:\n foamFilePath = foamFile\n return foamFilePath",
"def _find_files(directory, dirs_to_look_in, files_to_search_for, \n current_dir, see_files):\n full_name = True\n if see_files:\n full_name = False\n files_to_load = search_directory(directory, \n look_in=dirs_to_look_in,\n search_for=files_to_search_for,\n file_type='files',\n current_dir=current_dir,\n full_name=full_name)\n if not files_to_load:\n raise UserWarning('No files were found matching the search for %s'\\\n ' in the directory(s) %s%s' \\\n % (files_to_search_for, directory, \n dirs_to_look_in))\n return files_to_load",
"def find_file(path):\n return NotImplemented",
"def find_file(filename: str, paths: List[str] = None, extensions=None) -> str:\n\n extensions = conf.SEARCH_EXTENSIONS if not extensions else extensions\n paths = conf.SEARCH_DIRS if not paths else paths\n\n if '' not in extensions:\n extensions += ['']\n\n if os.path.isabs(filename):\n with open(filename, 'r'):\n return filename\n\n for ext in extensions:\n _fn = [f'{filename}{ext}']\n if '/' in filename:\n _fn = list(filename.split('/'))\n _fn[-1] = f'{_fn[-1]}{ext}'\n\n for p in paths:\n fpath = join(p, *_fn)\n try:\n with open(fpath, 'r'):\n return fpath\n except FileNotFoundError:\n continue\n\n raise FileNotFoundError(f'File \"{filename}\" could not be found in any of the given paths.')",
"def GetFile(file):\n\n file = file.replace(\"/\", \"\\\\\").strip(\"\\\\\")\n new = list(file)\n new.reverse()\n if \"\\\\\" not in new:\n return None, file # Don't raise an error, but there isn't any folder\n indx = new.index(\"\\\\\")\n return file[:-indx], file[-indx:] # Full path and file name",
"def find_file(line, column, *, cwd=None):\n cwd = cwd or pathlib.Path()\n path = None\n for finder in finders:\n path, lineno = finder(line, column, cwd)\n if path is not None:\n break\n\n if path is None:\n return None, None\n else:\n return path, lineno",
"def find_file(filesystem, dirs, filename):\r\n for directory in dirs:\r\n filepath = path(directory) / filename\r\n if filesystem.exists(filepath):\r\n return filepath\r\n raise ResourceNotFoundError(u\"Could not find {0}\".format(filename))",
"def find_file(filename):\n for i in list(_ctx.include_paths) + [ os.path.dirname(_ctx.filename) ]:\n full_path = os.path.join(i, filename)\n if os.path.exists(full_path):\n return full_path\n return filename # failure gets handled later on",
"def _findfile(self, path):\n\n # Build list of possible local file paths\n if not self._isurl(path):\n # Valid local paths\n filelist = self._possible_names(path)\n # Paths in self._destpath\n filelist += self._possible_names(self.abspath(path))\n else:\n # Cached URLs in self._destpath\n filelist = self._possible_names(self.abspath(path))\n # Remote URLs\n filelist = filelist + self._possible_names(path)\n\n for name in filelist:\n if self.exists(name):\n if self._isurl(name):\n name = self._cache(name)\n return name\n return None",
"def _FindPathSpec(self, path, filename):\n # TODO: determine why this first find is used add comment or remove.\n # It does not appear to help with making sure path segment separate\n # is correct.\n find_spec = file_system_searcher.FindSpec(\n location=path, case_sensitive=False)\n path_specs = list(self._searcher.Find(find_specs=[find_spec]))\n\n if not path_specs or len(path_specs) != 1:\n raise IOError(\n u'Unable to find directory: {0:s}'.format(path))\n\n relative_path = self._searcher.GetRelativePath(path_specs[0])\n if not relative_path:\n raise IOError(u'Unable to determine relative path of: {0:s}'.format(path))\n\n # The path is split in segments to make it path segement separator\n # independent (and thus platform independent).\n path_segments = self._searcher.SplitPath(relative_path)\n path_segments.append(filename)\n\n find_spec = file_system_searcher.FindSpec(\n location=path_segments, case_sensitive=False)\n path_specs = list(self._searcher.Find(find_specs=[find_spec]))\n\n if not path_specs:\n raise IOError(\n u'Unable to find file: {0:s} in directory: {1:s}'.format(\n filename, relative_path))\n\n if len(path_specs) != 1:\n raise IOError((\n u'Find for file: {0:s} in directory: {1:s} returned {2:d} '\n u'results.').format(filename, relative_path, len(path_specs)))\n\n if not relative_path:\n raise IOError(\n u'Missing file: {0:s} in directory: {1:s}'.format(\n filename, relative_path))\n\n return path_specs[0]",
"def find_library(file):\r\n search_paths = [pathlib.Path(\"/usr/local/lib/x86_64-linux-gnu\"),\r\n pathlib.Path(\"/lib/x86_64-linux-gnu\"),\r\n pathlib.Path(\"/usr/lib/x86_64-linux-gnu\"),\r\n pathlib.Path(\"/usr/local/lib64\"),\r\n pathlib.Path(\"/lib64\"),\r\n pathlib.Path(\"/usr/lib64\"),\r\n pathlib.Path(\"/usr/local/lib\"),\r\n pathlib.Path(\"/lib\"),\r\n pathlib.Path(\"/usr/lib\"),\r\n pathlib.Path(\"/usr/x86_64-linux-gnu/lib64\"),\r\n pathlib.Path(\"/usr/x86_64-linux-gnu/lib\")]\r\n\r\n for path in search_paths:\r\n full = path.joinpath(file)\r\n if full.is_file():\r\n return str(full)\r\n return None",
"def _get_files(path, file, modality):\n p = Path(path)\n res = [p/o for o in file if not o.startswith('.') and is_mods(o, modality)]\n assert len(res)==len(modality) #TODO: Assert message\n return res",
"def _find(self, path):\n try:\n self.log.debug(\"Searching for '%s'\" % path)\n os.stat(path)\n self.log.debug(\" found!\")\n return path\n except OSError:\n self.log.debug(\" not found!\")\n segs = path.split(os.path.sep)\n found_parent = os.path.sep\n assert segs[0] == '' # expect a leading /\n for i in range(1, len(segs)): # start after leading /\n try:\n parent = os.path.sep.join(segs[:i+1])\n self.log.debug(\" searching parent %d %s\" % (i, str(parent)))\n os.stat(parent)\n self.log.debug(\" found\")\n found_parent = parent\n except OSError:\n self.log.debug(\" NOT found\")\n break\n\n # does the found_parent dir contain a differently-cased version of the requested path?\n candidates = [f for f in os.listdir(found_parent) if f.lower() == segs[i].lower()]\n self.log.debug(' Candidates: %s' % str(candidates))\n if candidates:\n if len(candidates) > 1:\n self.log.warn('Case ambiguity: %s%s{%s}' % (found_parent, os.path.sep, ','.join(candidates)))\n segs[i] = candidates[0]\n path = os.path.sep.join(segs)\n if i < (len(segs)-1):\n self.log.debug('recursing')\n path = self._find(path) # recursively search with the new case-corrected path segment\n\n self.log.debug('resolved to [or failed with] path: %s' % path)\n\n # returns path unmodified if we were unable to find case-corrected candidates.\n # expects underlying command implementations to handle file-not-found correctly if so.\n return path",
"def find_directory_with_a_file(\n filename: str,\n cwd: Optional[Union[str, Path]] = None) -> Optional[Path]:\n if cwd is None:\n curr_dir = Path(os.getcwd()).absolute()\n else:\n curr_dir = Path(cwd).absolute()\n\n pathname = curr_dir / filename\n if pathname.exists():\n return curr_dir\n\n for work_dir in curr_dir.parents:\n pathname = work_dir / filename\n if pathname.exists():\n return work_dir\n\n return None",
"def find(self, file, version):\n matches = []\n for root, dirnames, filenames in os.walk(self.full_doc_path(version)):\n for filename in fnmatch.filter(filenames, file):\n matches.append(os.path.join(root, filename))\n return matches",
"def _find_file(self, name, check_dir='c_files'):\n testdir = os.path.dirname(__file__)\n name = os.path.join(testdir, check_dir, name)\n return name"
] | [
"0.63258296",
"0.61334497",
"0.5949965",
"0.5717448",
"0.57132846",
"0.56921214",
"0.56921214",
"0.5606734",
"0.55925655",
"0.5592373",
"0.5583398",
"0.5573338",
"0.5568939",
"0.55592126",
"0.5536514",
"0.55215186",
"0.5459383",
"0.5438104",
"0.53975683",
"0.53717285",
"0.53693366",
"0.5368109",
"0.5361428",
"0.53423816",
"0.5302034",
"0.52885276",
"0.5284287",
"0.52820075",
"0.52761555",
"0.5253943"
] | 0.7348206 | 0 |
ExecuteFile(file, params) Runs an executable file located in (one of) the Mods location. Returns the process' return code. | def ExecuteFile(*args): # the docstring lies about parameters
folder, file = FindFile(args[0])
params = args[1:]
log.logger("PARS_EXEC_FILE", format=[file, folder[:-1], params], display=False)
process = subprocess.Popen([folder + file] + list(params))
process.communicate()
return process.returncode | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_file(file_path, globals_, script_dir=SCRIPT_DIR):\n fix_sys_path()\n script_name = os.path.basename(file_path)\n script_name = SCRIPT_EXCEPTIONS.get(script_name, script_name)\n script_path = os.path.join(script_dir, script_name)\n print script_path\n execfile(script_path, globals_)",
"def run_execute_file(file_path, globals=None, locals=None):\n if globals is None:\n globals = {}\n globals.update({\n \"__file__\": file_path,\n \"__name__\": \"__main__\",\n })\n with open(file_path, 'rb') as file:\n exec(compile(file.read(), file_path, 'exec'), globals, locals)",
"def LaunchFile(*params):\n\n file = subprocess.Popen(params)\n file.communicate()\n return file.returncode",
"def execute(file_path):\n os.startfile(file_path)",
"def _run_file(file_path, globals_):\n script_name = os.path.basename(file_path)\n\n sys.path = (_PATHS.script_paths(script_name) +\n _PATHS.scrub_path(script_name, sys.path))\n\n fix_google_path()\n\n execfile(_PATHS.script_file(script_name), globals_)",
"def do_exec(self, arg):\n self.run_file(arg['path'])",
"def ExecuteFile(python_program, main_filename, args, env, module_space,\n coverage_entrypoint, workspace):\n # type: (str, str, list[str], dict[str, str], str, str|None, str|None) -> ...\n # We want to use os.execv instead of subprocess.call, which causes\n # problems with signal passing (making it difficult to kill\n # Bazel). However, these conditions force us to run via\n # subprocess.call instead:\n #\n # - On Windows, os.execv doesn't handle arguments with spaces\n # correctly, and it actually starts a subprocess just like\n # subprocess.call.\n # - When running in a workspace (i.e., if we're running from a zip),\n # we need to clean up the workspace after the process finishes so\n # control must return here.\n # - If we may need to emit a host config warning after execution, we\n # can't execv because we need control to return here. This only\n # happens for targets built in the host config.\n # - For coverage targets, at least coveragepy requires running in\n # two invocations, which also requires control to return here.\n #\n if not (IsWindows() or workspace or coverage_entrypoint):\n _RunExecv(python_program, main_filename, args, env)\n\n if coverage_entrypoint is not None:\n ret_code = _RunForCoverage(python_program, main_filename, args, env,\n coverage_entrypoint, workspace)\n else:\n ret_code = subprocess.call(\n [python_program, main_filename] + args,\n env=env,\n cwd=workspace\n )\n\n if workspace:\n shutil.rmtree(os.path.dirname(module_space), True)\n sys.exit(ret_code)",
"def run_file(filename, logfile=None, execdir=None):\n if not runpy_available: #pragma:nocover\n raise pyutilib.common.ConfigurationError(\"Cannot apply the run_file() function because runpy is not available\") \n #\n # Open logfile\n #\n if not logfile is None:\n sys.stderr.flush()\n sys.stdout.flush()\n save_stdout = sys.stdout\n save_stderr = sys.stderr\n OUTPUT=open(logfile,\"w\")\n sys.stdout=OUTPUT\n sys.stderr=OUTPUT\n #\n # Add the file directory to the system path\n #\n if '/' in filename:\n tmp= \"/\".join((filename).split(\"/\")[:-1])\n tmp_import = (filename).split(\"/\")[-1]\n sys.path.append(tmp)\n elif '\\\\' in filename:\n tmp = \"\\\\\".join((filename).split(\"\\\\\")[:-1])\n tmp_import = (filename).split(\"\\\\\")[-1]\n sys.path.append(tmp)\n else:\n tmp_import = filename\n name = \".\".join((tmp_import).split(\".\")[:-1])\n #\n # Run the module\n #\n try:\n if not execdir is None:\n tmp=os.getcwd()\n os.chdir(execdir)\n tmp_path = sys.path\n sys.path = [execdir] + sys.path\n runpy.run_module(name,None,\"__main__\")\n if not execdir is None:\n os.chdir(tmp)\n sys.path = tmp_path\n except Exception: #pragma:nocover\n if not logfile is None:\n OUTPUT.close()\n sys.stdout = save_stdout\n sys.stderr = save_stderr\n raise\n #\n # Close logfile\n #\n if not logfile is None:\n OUTPUT.close()\n sys.stdout = save_stdout\n sys.stderr = save_stderr",
"def execfile_(filepath: str, _globals: Any) -> None:\n with open(filepath, 'rb') as stream:\n source = stream.read()\n\n code = compile(source, filepath, 'exec')\n exec(code, _globals)",
"def execute_file(filename):\n try:\n f = open(filename, 'r')\n except IOError, err:\n print \"Could not open file\", filename\n return\n blotish._set_interactive(False)\n exit_flag = False\n for line in f:\n line = line.rstrip()\n exit_flag = execute(line)\n if exit_flag: break\n if interpreter._error_flag: break\n blotish._set_interactive(True)\n return exit_flag",
"def run_file(self, fpath):\n with open(fpath, \"r\", encoding=\"utf-8\") as fin:\n return self.run_commands(fin.read())",
"def execfile(self, filename) -> str:\n\n debug(f\"pyboard execfile({filename})\")\n with open(filename, \"rb\") as f:\n pyfile = f.read()\n return self.exec_(pyfile)",
"def execute(\n file: str,\n args: Sequence[str],\n cwd: Optional[Union[Path, str]] = None,\n env: Optional[dict] = None,\n capture: bool = False,\n verbose: Optional[bool] = None,\n) -> CompletedProcess:\n\n if env is None:\n env = os.environ.copy()\n\n if is_verbose(verbose):\n log.ok(f\"run: {' '.join(shlex.quote(arg) for arg in args)}\")\n\n if capture:\n return _execute_and_capture(file, args, cwd, env, verbose)\n else:\n return _execute(file, args, cwd, env)",
"def exec_file(filename, globals=None, locals=None):\n if globals is None:\n globals = {}\n if locals is None:\n locals = globals\n locals['__file__'] = filename\n from py import path\n from _pytest import config\n from _pytest.assertion import rewrite\n f = path.local(filename)\n config = config._prepareconfig([], [])\n source_stat, code = rewrite._rewrite_test(config, f)\n logger.debug('filename: {} source_stat: {} code: {}'.format(filename, source_stat, code))\n exec(code, globals, locals)",
"def exec_file(path: str, global_vars: Dict[str, Any]) -> None:\n with open(path) as file:\n exec(compile(file.read(), path, \"exec\"), global_vars) # pylint: disable=exec-used",
"def execute_file(self, files, **kw):\n\n mode = kw['mode'] if 'mode' in kw else 0\n\n # ranger can act as a file chooser when running with --choosefile=...\n if mode == 0 and 'label' not in kw:\n if ranger.args.choosefile:\n open(ranger.args.choosefile, 'w').write(self.fm.thisfile.path)\n\n if ranger.args.choosefiles:\n paths = []\n for hist in self.fm.thistab.history:\n for fobj in hist.files:\n if fobj.marked and fobj.path not in paths:\n paths += [fobj.path]\n paths += [f.path for f in self.fm.thistab.get_selection() if f.path not in paths]\n\n with open(ranger.args.choosefiles, 'w') as fobj:\n fobj.write('\\n'.join(paths) + '\\n')\n\n if ranger.args.choosefile or ranger.args.choosefiles:\n raise SystemExit\n\n if isinstance(files, set):\n files = list(files)\n elif not isinstance(files, (list, tuple)):\n files = [files]\n\n flags = kw.get('flags', '')\n if 'c' in squash_flags(flags):\n files = [self.fm.thisfile]\n\n self.signal_emit('execute.before', keywords=kw)\n filenames = [f.path for f in files]\n label = kw.get('label', kw.get('app', None))\n try:\n return self.rifle.execute(filenames, mode, label, flags, None)\n finally:\n self.signal_emit('execute.after')",
"def runScript(path=None):\n if path:\n exec(compile(open(path, \"rb\").read(), path, 'exec'))",
"def execute_file (self, program):\n with open (program, \"r\") as stream:\n self.execute (stream.read ())\n return self.context",
"def execute(filepath, method):\n cmd = ['./iengine', method, filepath]\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n return proc.communicate()",
"def run_file(filepath: str, strict: bool = False, debug: bool = False):\n _, extension = os.path.splitext(filepath)\n try:\n return _ext_fn_map[extension](filepath, strict=strict, debug=debug)\n except KeyError:\n raise ValueError(\"There was an error running the file (invalid file extension).\")",
"def do_file(self, filename):\n with open(filename, \"r\") as infile:\n self._run_cmd(infile.read())",
"def execute(self, args=\"\"):\r\n return super(PythonScript, self).execute(_EXECUTABLE, args)",
"def runFile(self, f, name=None):\n return self.run(f.read(), name)",
"def run_scratch_file(file_name, settings):\n return subprocess.call([file_name])",
"def execution(self, config: Config,\n cwd: Path, file: str, arguments: List[str]) -> Command:\n raise NotImplementedError",
"def execute_script_from_file(self, filename):\n filename = os.path.join(self.curr_dir, filename)\n # Connect to db\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with open(filename, \"r\", encoding=\"utf-8\") as sql_file:\n sql_script = sql_file.read()\n\n # all SQL commands (split on ';')\n sql_commands = filter(None, sql_script.split(\";\"))\n # Execute every command from the input file\n for command in sql_commands:\n # This will skip and report errors\n # For example, if the tables do not yet exist, this will skip over\n # the DROP TABLE commands\n try:\n cursor.execute(command)\n except OperationalError as msg:\n print(\"Command skipped: \", msg)\n conn.commit()\n conn.close()",
"def run(filename):\n try:\n with open(filename) as f:\n interp.runcode(f.read())\n except IOError as e:\n self.perror(e)",
"def modExec(module):\n modName = module.split('_')[-1]\n if \"live\" in module:\n dn = '{0} (live)'.format(modName.upper())\n else:\n dn = '{0}'.format(modName.upper())\n\n try:\n modStart = datetime.utcnow()\n log.info(\"Running {0}\".format(dn))\n modImport = 'modules.' + module\n\n import_module(modImport)\n\n modOutput = [i for i in glob.glob(outputdir + '/*') if all(p in i for p in [modName, runID])]\n try:\n arch = [archive.add_file(os.path.basename(outfile)) for outfile in modOutput]\n except IndexError:\n pass\n\n modEnd = datetime.utcnow()\n modRuntime = modEnd - modStart\n log.debug(\"{0} finished in {1}.\".format(dn, modRuntime))\n\n except KeyboardInterrupt:\n sys.stdout.write('\\r')\n sys.stdout.flush()\n log.error(\"{0} was killed. \".format(module))\n\n except Exception:\n log.error(\"{0} failed: {1}\".format(module, [traceback.format_exc()]))",
"def run_python_file(python, file_args, directives=None):\n args = []\n if directives:\n for directive in directives:\n args.extend(('-X', directive))\n args.extend(file_args)\n command = (\n \"import Cython.Build.BuildExecutable as bex; \"\n \"bex.DEBUG = False; \"\n \"bex.build_and_run({args!r})\"\n ).format(args=args)\n run_python(python, command)",
"def main():\n if os.path.isdir(path):\n for filename in os.listdir(path):\n if filename.endswith('.asm'):\n execute_asm_file(path + '/' + filename, filename)\n else:\n execute_asm_file(path, path[path.rfind(\"/\") + 1:])"
] | [
"0.698795",
"0.69709724",
"0.6680755",
"0.6648183",
"0.6581526",
"0.64158064",
"0.6392396",
"0.62397593",
"0.6199557",
"0.60507655",
"0.601007",
"0.5984262",
"0.5943254",
"0.5921672",
"0.59054035",
"0.58133644",
"0.5799649",
"0.57672405",
"0.5734077",
"0.5714152",
"0.5699595",
"0.56478083",
"0.5592738",
"0.55458224",
"0.549969",
"0.5431632",
"0.5419596",
"0.5408868",
"0.5400709",
"0.53792185"
] | 0.7974937 | 0 |
GetFile(file) Splits the folder and file from a full path. Returns a tuple of (folder, file). | def GetFile(file):
file = file.replace("/", "\\").strip("\\")
new = list(file)
new.reverse()
if "\\" not in new:
return None, file # Don't raise an error, but there isn't any folder
indx = new.index("\\")
return file[:-indx], file[-indx:] # Full path and file name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_file_path(file_path):\n\n file_name = os.path.basename(file_path)\n\n cutoff = len(file_path) - len(file_name)\n\n folder_path = file_path[:cutoff]\n\n return folder_path, file_name",
"def parse_file_path(file_path):\n\n file_name = os.path.basename(file_path)\n\n cutoff = len(file_path) - len(file_name)\n\n folder_path = file_path[:cutoff]\n\n return folder_path, file_name",
"def spilt_path(unclean_path_to_file):\n if os.path.exists(unclean_path_to_file) == True:\n return os.path.split(unclean_path_to_file)\n else:\n return None, unclean_path_to_file",
"def split(self, f):\n x = os.path.split(f)\n subjectid = os.path.split(x[-2])[-1]\n imagefile = x[-1]\n return (subjectid, imagefile)",
"def get_file(view: sublime.View, string: str, name: str) -> 'Tuple[str, Optional[str]]':\n\n # if it's an absolute path get it\n if osp.isabs(string):\n return string, None\n\n # if search_mode: \"project\", search only in project\n elif Settings.search_mode == \"project\":\n # Get base project folders\n base_folders = sublime.active_window().folders()\n # if \"recursive\": true, recursively search for the name\n if Settings.recursive:\n ch_rec = check_recursive(base_folders, name)\n if ch_rec:\n base_folder, root = ch_rec\n return osp.join(root, name), base_folder\n return \"\", None\n else:\n # search only in base folders for the relative path\n for base_folder in base_folders:\n file_name = osp.normpath(osp.join(base_folder, string))\n if osp.exists(file_name):\n return file_name, base_folder\n return \"\", None\n # if search_mode: \"file\" join the relative path to the file path\n else:\n return osp.normpath(osp.join(osp.dirname(view.file_name()), string)), None",
"def file_parts(file_path):\n\n base_path, tail = os.path.split(file_path)\n name, ext = os.path.splitext(tail)\n\n return base_path, name, ext",
"def get_relative_path (folder, file) :\n if not os.path.exists (folder) : raise PQHException (folder + \" does not exist.\")\n if not os.path.exists (file) : raise PQHException (file + \" does not exist.\")\n sd = folder.replace(\"\\\\\",\"/\").split(\"/\")\n sf = file.replace(\"\\\\\",\"/\").split(\"/\")\n for i in range (0, len (sd)) :\n if i >= len (sf) : break\n elif sf [i] != sd [i] : break\n res = copy.copy (sd)\n j = i\n while i < len (sd) :\n i += 1\n res.append (\"..\")\n res.extend (sf [j:])\n return os.path.join (*res)",
"def seperate_file(file):\n firstHalf = file.split(\"\\\\\"[-1])\n #print \"This is the node\", firstHalf[-2]\n node = firstHalf[-2]\n print \"\\nReading results for \", node\n return node",
"def get_file_path(cls, file_name, folder_name):\n return cls.file_path.parent / folder_name / file_name",
"def get_file(_file):\n _file = pathlib.Path(_file)\n if not _file.is_file():\n _file = None\n return _file",
"def get_file_from_path(file_path):\n return Utils.get_real_file_path(file_path)",
"def split_leading_directory(file_path):\n\tdelim = '/'\n\tpath_split = file_path.split(delim)\n\tif len(path_split) > 0:\n\t\tlead_dir = path_split[0]\n\telse:\n\t\tlead_dir = ''\n\tif len(path_split) > 1:\n\t\trest = delim.join(path_split[1:])\n\telse:\n\t\trest = ''\n\treturn lead_dir, rest",
"def result_file(file_path: str) -> Union[str, None]:\n if not os.path.isdir(file_path):\n return None\n else:\n file_list = list()\n for file in os.listdir(file_path):\n file_list.append(file)\n if not file_list or len(file_list) > 1:\n # it should be just one file per file_id directory\n return None\n else:\n return file_list[0]",
"def filename_split(path):\n\tdirectory = os.path.dirname(path)\n\tfilename, extension = os.path.splitext(os.path.basename(path))\n\treturn directory, filename, extension",
"def parse_folder(file_folder: str) -> Tuple[list, list, list]:\n\n raw_files = [\n _\n for _ in os.listdir(file_folder)\n if _.lower().endswith(\".raw\") or _.lower().endswith(\".d\") or _.lower().endswith(\".mzml\")\n ]\n fasta_files = [_ for _ in os.listdir(file_folder) if _.lower().endswith(\".fasta\")]\n db_files = [\n _ for _ in os.listdir(file_folder) if _.lower().endswith(\".db_data.hdf\")\n ]\n\n return raw_files, fasta_files, db_files",
"def split_file_name(file_path):\n file_name = os.path.splitext(file_path)[0]\n file_name = os.path.split(file_name)[1]\n\n return file_name",
"def parse_file_path(file_path):\n base = Path(file_path)\n return str(base.parents[0]), str(base.stem), str(base.suffix)",
"def get_file(view: View, string: str, name: str):\n\n # if it's an absolute path get it\n if osp.isabs(string):\n return string, None\n\n # if search_mode: \"project\", search only in project\n elif SEARCH_MODE == \"project\":\n # Get base project folders\n base_folders = sublime.active_window().folders()\n # if \"recursive\": true, recursively search for the name\n if RECURSIVE:\n ch_rec = check_recursive(base_folders, name)\n if ch_rec:\n base_folder, root = ch_rec\n return osp.join(root, name), base_folder\n return \"\", None\n else:\n # search only in base folders for the relative path\n for base_folder in base_folders:\n file_name = osp.normpath(osp.join(base_folder, string))\n if osp.exists(file_name):\n return file_name, base_folder\n return \"\", None\n # if search_mode: \"file\" join the relative path to the file path\n else:\n return osp.normpath(osp.join(osp.dirname(view.file_name()), string)), None",
"def split3 (filename):\n directory, basename = os.path.split (filename)\n basename, extension = os.path.splitext (basename)\n return directory, basename, extension",
"def read_foldername(folder):\n match = re.search(r'(\\d+)\\s-\\s(\\d\\d\\d\\d)-(\\d\\d)\\s-\\s(.+).*\\s-\\s(.+)\\s--\\s(.+)', folder)\n if match is not None:\n foldertuple = match.groups()\n else:\n match = re.search(r'(\\d+)\\s-\\s(\\d\\d\\d\\d)-(\\d\\d)\\s-\\s()(.*)\\s--\\s(.+)', folder)\n if match is not None:\n foldertuple = match.groups()\n else:\n match = re.search(r'(\\d+)\\s-\\s(\\d\\d\\d\\d)-(\\d\\d)()()()', folder)\n if match is not None:\n foldertuple = match.groups()\n else:\n foldertuple = None\n return foldertuple",
"def get_fld_fl(file):\n if '_rels' in file: # slides/_rels/slide2.xml.rels\n sp = file.split('/')\n fl_name = sp[-1]\n fld_name = f'{sp[0]}/{sp[1]}'\n elif '../' in file:\n _,fld_name,fl_name = file.split('/')\n else:\n fld_name,fl_name = file.split('/')\n \n return fld_name, fl_name",
"def __parse_full_path(path):\n dir = path[:path.rfind('/') + 1]\n name = path[path.rfind('/') + 1:]\n return dir, name",
"def getFileDir(filepath):\n return os.path.dirname(filepath)",
"def check_file(file: Path):\n if Path(file).is_file() or file == \"\":\n return file\n else:\n files = glob.glob(\"./**/\" + file, recursive=True) # find file\n FILE_NOT_FOUND_MSG = f\"File Not Found: {file}\"\n MULTIPLE_FILE_MSG = f\"Multiple files match '{file}', specify exact path:{files}\"\n\n assert len(files), FILE_NOT_FOUND_MSG # assert file was found\n assert len(files) == 1, MULTIPLE_FILE_MSG # assert unique\n return files[0] # return file",
"def get_file_path(filename):\n if 'http' in filename:\n parsed_uri = urlparse(filename)\n f = '/' + parsed_uri.path[1:]\n f = '/'.join(f.split('/')[3:]) # split the xxx dir, remove the leading /\n else:\n filename = ('/' + filename) if filename[0] != '/' else filename # make sure starts with /\n # split local img path from path\n f = filename.replace(settings.FILE_PATH, '/')\n f = f.replace(settings.IMAGE_PATH, '/')\n f = f.replace(settings.DERIVED_PATH, '/')\n f = '/'.join(f.split('/')[2:]) # split the xxx dir, remove the leading /\n\n return f",
"def get_path(f=sys.argv[0]):\n\n return os.path.split(f)",
"def split_path(path:str):\n if path is None or len(path) == 0:\n return '', '', ''\n path = sanitize_path(path)\n folder, filename = os.path.split(path)\n ext = ''\n if '.' in filename:\n filename, ext = os.path.splitext(filename)\n # handle double ext, like 'mode.pth.tar'\n filename, ext2 = os.path.splitext(filename)\n ext = ext2 + ext\n else:\n folder = os.path.join(folder, filename)\n filename = ''\n return folder, filename, ext",
"def file_directory(file):\n return os.path.dirname(os.path.realpath(file))",
"def ReturnPathOfFile(self, url):\n\tcount=0\n\turlComponent = urlparse.urlparse(url)\n\tfor part in urlComponent:\n\t count = count + 1\n\t if count == 3:\n\t\tFolderPath = part\n\treturn FolderPath",
"def get_file(self, key):\n result = (None, None)\n path = os.path.join(self.directory, self.subdirectory, key)\n if os.path.isfile(path):\n content_type, _ = mimetypes.guess_type(path)\n with open(path, \"rb\") as file:\n result = content_type, file.read()\n return result"
] | [
"0.6762138",
"0.6762138",
"0.60875225",
"0.60354054",
"0.6009469",
"0.59130657",
"0.5769223",
"0.56438655",
"0.56382656",
"0.56349343",
"0.5631418",
"0.56203586",
"0.5578579",
"0.5524074",
"0.55204046",
"0.5500459",
"0.5481047",
"0.5479951",
"0.546729",
"0.5453095",
"0.5443306",
"0.5433338",
"0.54009086",
"0.5381109",
"0.53629786",
"0.53469837",
"0.5345579",
"0.53102946",
"0.5295959",
"0.52880096"
] | 0.7454542 | 0 |
ExtractFile(file, dst=None, pw=None) Extracts an archive into the temp folder. Specify a file, a destination and a password. If 'file' is not an archive, it will simply copy it over. If 'dst' is not specified, it will use the file's name. Returns the location of the resulting files. | def ExtractFile(file, dst=None, pw=None):
path, file = FindFile(file)
if file.endswith(".rar"):
type = "rar"
elif file.endswith((".zip", ".7z")):
type = "zip"
else:
type = None
if dst is None:
dst = file
if not dst.endswith(("/", "\\")):
dst = dst + "\\"
if not path.endswith(("/", "\\")):
path = path + "\\"
if pw is None:
pw = "none"
if type == "rar": # Rar file
subprocess.Popen([var.RAR_LOCATION, "x", "-y", "-p" + pw, path+file, var.BOOTLEG_TEMP + dst]).wait()
elif type == "zip": # Zip file
subprocess.Popen([var.SEVENZ_LOCATION, "x", "-p" + pw, "-y", "-o" + var.BOOTLEG_TEMP + dst, path + file]).wait()
else: # No type, just copy it over
shutil.copy(path + file, var.BOOTLEG_TEMP + dst + file)
log.logger("PARS_EXTR_FILE", format=[path + file], display=False)
return var.BOOTLEG_TEMP + dst | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def extractfile(file, passwd):\n try:\n zipf = zipfile.ZipFile(file)\n zipf.extractall(path=os.path.join(file[:-4]), pwd=str.encode(passwd))\n print('Password: {}'.format(passwd))\n except:\n pass",
"def ExtractFile(self, dest_dir):\n self.__get_packed_xwalk_app_template(dest_dir)\n file_path = os.path.join(dest_dir, self.file_name)\n tar = tarfile.open(file_path, 'r:gz')\n tar.extractall(dest_dir)\n tar.close()\n file_path = os.path.join(dest_dir, self.file_name)\n if os.path.isfile(file_path):\n os.remove(file_path)",
"def extract_file(self):\n# path_destination = os.path.join(\n# self.root, self.resources.replace(\".zip\", \"\"))\n# os.makedirs(path_destination, exist_ok=True)\n shutil.unpack_archive(os.path.join(\n self.root, self.resources), self.root)\n os.remove(os.path.join(self.root, self.resources))",
"def SshExtractZip(host, zipname, dst):\n command = ['ssh', host, 'unzip', '-o', '-d', dst, zipname]\n result = RunCommand(command)\n if result:\n raise ExternalError('Failed to ssh unzip -o -d \"%s\" \"%s\" on \"%s\" (%s)' %\n (dst, zipname, host, result))\n\n # unzip will create directories with access 700, which is not often what we\n # need. Fix the permissions for the whole archive.\n command = ['ssh', host, 'chmod', '-R', '755', dst]\n result = RunCommand(command)\n if result:\n raise ExternalError('Failed to ssh chmod -R 755 \"%s\" on \"%s\" (%s)' %\n (dst, host, result))",
"def extract(file, fileFormat):\n\tspeech.speak(\"Extracting files in \" + file + \".\")\n\tpatoolib.extract_archive(file)",
"def _extract_zip(src, dst):\n # check if src is a valid .zip\n assert zipfile.is_zipfile(src), \"{} is not a valid .zip file.\".format(src)\n\n zip_file = zipfile.ZipFile(src, \"r\")\n for file in zip_file.namelist():\n zip_file.extract(file, dst)",
"def extract_via_patoolib(\r\n file_path: str, unpack_path: str = None, remove_if_exists: bool = False\r\n) -> Optional[str]:\r\n # TODO handle compression with -zvxf\r\n if not os.path.exists(file_path):\r\n log.warning(file_path + \" does not exist.\")\r\n return None\r\n\r\n try:\r\n import patoolib\r\n except ImportError:\r\n log.warning(\"patoolib is not installed: Run pip install patool\")\r\n return None\r\n\r\n if not unpack_path:\r\n unpack_path = os.path.join(\r\n os.path.dirname(file_path), os.path.splitext(os.path.basename(file_path))[0]\r\n )\r\n\r\n if os.path.isdir(unpack_path):\r\n log.info(\"Unpack directory already exists \" + unpack_path)\r\n if not os.listdir(unpack_path):\r\n log.info(\"Directory is empty. Unpacking...\")\r\n elif remove_if_exists:\r\n log.info(\"Removing existing unpacked dir: \" + unpack_path)\r\n shutil.rmtree(unpack_path)\r\n else:\r\n return unpack_path\r\n\r\n try:\r\n patoolib.extract_archive(file_path, outdir=unpack_path)\r\n except Exception as e:\r\n log.warning(\"Failed to unpack via patoolib: \", exc_info=e)\r\n return None\r\n\r\n return unpack_path",
"def _zipfile_single_file_extract_worker(\n zip_file_path: Path,\n file_in_archive: zipfile.ZipInfo,\n destination_folder: Path,\n is_dir: bool,\n) -> Path:\n with _FastZipFileReader(zip_file_path) as zf:\n # assemble destination and ensure it exits\n destination_path = destination_folder / file_in_archive.filename\n\n if is_dir:\n destination_path.mkdir(parents=True, exist_ok=True)\n return destination_path\n desc = f\"decompressing {zip_file_path}:{file_in_archive.filename} -> {destination_path}\\n\"\n with zf.open(name=file_in_archive) as zip_fp, destination_path.open(\n \"wb\"\n ) as dest_fp, tqdm_logging_redirect(\n total=file_in_archive.file_size,\n desc=desc,\n **(\n _TQDM_FILE_OPTIONS\n | dict(miniters=_compute_tqdm_miniters(file_in_archive.file_size))\n ),\n ) as pbar:\n while chunk := zip_fp.read(_CHUNK_SIZE):\n dest_fp.write(chunk)\n pbar.update(len(chunk))\n return destination_path",
"def ExtractZip(filename, output_dir, verbose=True):\n MaybeMakeDirectory(output_dir)\n\n # On Linux and Mac, we use the unzip command as it will\n # handle links and file bits (executable), which is much\n # easier then trying to do that with ZipInfo options.\n #\n # On Windows, try to use 7z if it is installed, otherwise fall back to python\n # zip module and pray we don't have files larger than 512MB to unzip.\n unzip_cmd = None\n if IsLinux():\n unzip_cmd = ['unzip', '-o']\n elif IsMac():\n # The Mac version of unzip does not have LARGE_FILE_SUPPORT until\n # macOS 10.12, so use ditto instead. The Python ZipFile fallback\n # used on Windows does not support symbolic links, which makes it\n # unsuitable for Mac builds.\n unzip_cmd = ['ditto', '-x', '-k']\n elif IsWindows() and os.path.exists('C:\\\\Program Files\\\\7-Zip\\\\7z.exe'):\n unzip_cmd = ['C:\\\\Program Files\\\\7-Zip\\\\7z.exe', 'x', '-y']\n\n if unzip_cmd:\n # Make sure path is absolute before changing directories.\n filepath = os.path.abspath(filename)\n saved_dir = os.getcwd()\n os.chdir(output_dir)\n command = unzip_cmd + [filepath]\n # When using ditto, a destination is required.\n if command[0] == 'ditto':\n command += ['.']\n result = RunCommand(command)\n os.chdir(saved_dir)\n if result:\n raise ExternalError('unzip failed: %s => %s' % (str(command), result))\n else:\n assert IsWindows()\n zf = zipfile.ZipFile(filename)\n # TODO(hinoka): This can be multiprocessed.\n for name in zf.namelist():\n if verbose:\n print 'Extracting %s' % name\n zf.extract(name, output_dir)\n if IsMac():\n # Restore permission bits.\n os.chmod(os.path.join(output_dir, name),\n zf.getinfo(name).external_attr >> 16L)",
"def extract_zip(file, extract_location):\n\n with zipfile.ZipFile(file, \"r\") as zip_ref:\n zip_ref.extractall(extract_location)\n\n print(f\"Extracted file to {extract_location}\")",
"def extract(self, step_name, archive_file, output, mode='safe',\n include_files=()):\n assert mode in ('safe', 'unsafe'), 'Unknown mode %r' % (mode,)\n\n step_result = self.m.python(\n step_name,\n self.resource('extract.py'),\n [\n '--json-input', self.m.json.input({\n 'output': str(output),\n 'archive_file': str(archive_file),\n 'safe_mode': mode == 'safe',\n 'include_files': list(include_files),\n }),\n '--json-output', self.m.json.output(),\n ],\n step_test_data=lambda: self.m.json.test_api.output({\n 'extracted': {\n 'filecount': 1337,\n 'bytes': 0xbadc0ffee,\n },\n }))\n self.m.path.mock_add_paths(output)\n j = step_result.json.output\n if j.get('extracted', {}).get('filecount'):\n stat = j['extracted']\n step_result.presentation.step_text += (\n '<br/>extracted %s files - %.02f MB' % (\n stat['filecount'], stat['bytes'] / (1000.0**2)))\n if j.get('skipped', {}).get('filecount'):\n stat = j['skipped']\n step_result.presentation.step_text += (\n '<br/>SKIPPED %s files - %.02f MB' % (\n stat['filecount'], stat['bytes'] / (1000.0**2)))\n step_result.presentation.logs['skipped files'] = stat['names']\n step_result.presentation.status = self.m.step.FAILURE\n ex = self.m.step.StepFailure(step_name)\n ex.archive_skipped_files = stat['names']\n raise ex",
"def extract_all(fn,dst=\".\"):\r\n if tarfile.is_tarfile(fn): \r\n with tarfile.open(fn,'r') as tf:\r\n tf.extractall(dst)\r\n tf.close()\r\n elif zipfile.is_zipfile(fn):\r\n with zipfile.ZipFile(fn, 'r') as zf:\r\n zf.extractall(dst)\r\n zf.close()\r\n else:\r\n print( \"Please provide a tar archive file or zip file\" )",
"def unzipfile(filename, passcode):\n # Password is SHA-256 hash of the pass code received\n password = hashlib.sha256(passcode.encode('utf-8')).hexdigest()\n # Unzip with password\n with ZipFile(filename) as zf:\n zf.extractall(pwd=bytes(password, 'utf-8'))",
"def unpack(filename: Union[str, Path], extract_to: Union[str, Path]) -> None:\n raise NotImplemented",
"def maybe_extract(filename):\n ext = path.splitext(filename)[1]\n if ext not in EXTRACTORS.keys():\n return None\n # Append the full filepath to the tempdir\n tempdir_root = tempfile.mkdtemp()\n tempdir = path.join(tempdir_root, filename.lstrip('/'))\n os.makedirs(tempdir)\n EXTRACTORS[ext](filename, tempdir)\n rchmod(tempdir_root)\n return tempdir_root",
"def _extract(self, file_path):\n self._log.debug(\"Extracting file {!r}\".format(\n os.path.basename(file_path)\n ))\n tar = tarfile.open(file_path, \"r:gz\")\n tar.extractall(os.path.dirname(file_path))\n tar.close()",
"def unpack(file_path, extraction_path, remove):\n print(file_path)\n Archive(file_path).extractall(extraction_path, auto_create_dir=True)\n # remove original compressed file???\n if remove is True:\n os.remove(file_path)",
"def extract_file(self, filename):\n unp_bin = os.path.join(self.modulebin, 'unp')\n\n filepath = os.path.dirname(filename)\n uncompressed = ['fasta', 'fa', 'fastq', 'fq', 'fna', 'h5' ]\n supported = ['tar.gz', 'tar.bz2', 'bz2', 'gz', 'lz',\n 'rar', 'tar', 'tgz','zip']\n for ext in uncompressed:\n if filename.endswith('.'+ext):\n return filename\n for ext in supported:\n if filename.endswith('.'+ext):\n extracted_file = filename[:filename.index(ext)-1]\n if os.path.exists(extracted_file): # Check extracted already\n return extracted_file\n logger.info(\"Extracting {}...\".format(filename))\n # p = subprocess.Popen([unp_bin, filename],\n # cwd=filepath, stderr=subprocess.STDOUT)\n # p.wait()\n # Hide the \"broken pipe\" message from unp\n out = subprocess.Popen([unp_bin, filename],\n cwd=filepath,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT).communicate()[0]\n if os.path.exists(extracted_file):\n return extracted_file\n else:\n logger.error(\"Extraction of {} failed: {}\".format(filename, out))\n raise Exception('Archive structure error')\n logger.error(\"Could not extract {}\".format(filename))\n return filename",
"def _extract_file(dest_path, root_dir):\n logger.info(\"Unzipping the dataset file.\")\n with zipfile.ZipFile(dest_path, \"r\") as zip_dir:\n zip_dir.extractall(root_dir)",
"def unzip(zipped_file, output_directory=None,\n prefix=\"apsharvest_unzip_\", suffix=\"\"):\n if not output_directory:\n # We create a temporary directory to extract our stuff in\n try:\n output_directory = mkdtemp(suffix=suffix,\n prefix=prefix,\n dir=os.path.join(CFG_TMPSHAREDDIR, 'apsharvest'))\n except Exception, e:\n try:\n os.removedirs(output_directory)\n except TypeError:\n pass\n raise e\n return _do_unzip(zipped_file, output_directory)",
"def extract_zip_contents(zip_file, destination):\n logging.info(\"Extracting ZIP File\")\n if os.path.isfile(zip_file):\n with zipfile.ZipFile(zip_file, \"r\") as zip_ref:\n zip_ref.extractall(destination)\n else:\n logging.error(\"%s not found.\", zip_file)\n sys.exit(\"ZIP is not the filesystem.\")",
"def extract_file(f, seekpt, size, dest_path):\n f.seek(seekpt)\n # Clear the file in case there are permission problems\n subprocess.call(['rm', '-f', dest_path])\n with open(dest_path, 'wb') as out:\n while size:\n bytes=min(size, 8192)\n out.write(f.read(bytes))\n size -= bytes",
"def ExtractMod(mod, dst=None, pw=None, range=None, overwrite=True):\n\n file = getattr(fl, mod)\n if dst is None:\n dst = var.BOOTLEG_TEMP + mod\n\n try:\n if range is None:\n FindFile(file)\n else: # explicit override of the range function - yay __builtins__ :D\n for num in __builtins__.range(*range):\n FindFile(file.format(num))\n log.logger(\"PARS_INSTALLING\", format=[mod])\n ExtractFile(file, None, pw)\n CopyFolder(var.BOOTLEG_TEMP + file, dst, overwrite)\n log.logger(\"PARS_COMPLETED\", format=[mod])\n except FileNotFoundError:\n CallSkipMod(file)\n return dst",
"def copy_file(filename, dst):\n # Create dir if needed\n dir_path = os.path.dirname(os.path.expanduser(dst))\n if not os.path.isdir(dir_path):\n os.makedirs(dir_path)\n\n src = os.path.join(get_data(''), filename)\n dst = os.path.expanduser(dir_path)\n shutil.copy2(src, dst)",
"def unpack_archive(\n filepath: types.PathLike, *, extract_dir: Optional[types.PathLike] = None\n) -> types.PathLike:\n filepath = utils.to_path(filepath).resolve()\n if not extract_dir:\n extract_dir = str(filepath.parent)\n filepath = str(filepath)\n os.makedirs(extract_dir, exist_ok=True)\n is_zipfile = zipfile.is_zipfile(filepath)\n is_tarfile = tarfile.is_tarfile(filepath)\n if not is_zipfile and not is_tarfile:\n LOGGER.debug(\"'%s' is not an archive\", filepath)\n return extract_dir\n else:\n LOGGER.info(\"extracting data from archive file '%s'\", filepath)\n shutil.unpack_archive(filepath, extract_dir=extract_dir, format=None)\n # we want to rename the unpacked directory to a consistent value\n # unfortunately, shutil doesn't pass this back to us\n # so, we get the root path of all the constituent members\n if is_zipfile:\n with zipfile.ZipFile(filepath, mode=\"r\") as zf:\n members = zf.namelist()\n else:\n with tarfile.open(filepath, mode=\"r\") as tf:\n members = tf.getnames()\n src_basename = os.path.commonpath(members)\n dest_basename = os.path.basename(filepath)\n if src_basename:\n while True:\n tmp, _ = os.path.splitext(dest_basename)\n if tmp == dest_basename:\n break\n else:\n dest_basename = tmp\n if src_basename != dest_basename:\n return shutil.move(\n os.path.join(extract_dir, src_basename),\n os.path.join(extract_dir, dest_basename),\n )\n else:\n return os.path.join(extract_dir, src_basename)\n else:\n return extract_dir",
"def extract_file(self):\n shutil.unpack_archive(os.path.join(self.root, self.resources), self.root)\n os.remove(os.path.join(self.root, self.resources))",
"def unzip_file(zip_file: str) -> None:\n destination = tempfile.mkdtemp(prefix='gaelo_pross_unzip_')\n with ZipFile(zip_file) as my_zip:\n for member in my_zip.namelist():\n filename = os.path.basename(member)\n # skip directories\n if not filename:\n continue\n # copy file (taken from zipfile's extract)\n source = my_zip.open(member)\n target = open(os.path.join(destination, filename), \"wb\")\n with source, target:\n shutil.copyfileobj(source, target)\n # return destination",
"def _extract_to_temp(filename):\n temp_rule_dir = util.get_tmpdir()\n status = subprocess.call(\"tar zxf %s -C %s\" % (\n filename, temp_rule_dir), shell=True)\n return temp_rule_dir",
"def extract_file(self):\n shutil.unpack_archive(os.path.join(\n self.root, self.resources), f\"{self.root}\")\n os.remove(os.path.join(self.root, self.resources))",
"def _unzip_file(zip_file_path: str, unzip_dir: str = \"\") -> None:\n if not unzip_dir:\n unzip_dir = os.path.dirname(zip_file_path)\n op_desc = f\"Extracting: {os.path.basename(zip_file_path)}\"\n try:\n with ZipFile(file=zip_file_path) as zip_file:\n for member_name in tqdm(zip_file.namelist(), desc=op_desc):\n file_name = os.path.basename(member_name)\n if not file_name:\n continue\n target_path = os.path.join(unzip_dir, file_name)\n target_path = open(target_path, \"wb\")\n source_file = zip_file.open(member_name)\n with source_file, target_path:\n shutil.copyfileobj(source_file, target_path)\n os.remove(zip_file_path)\n except Exception as zip_error:\n zip_file_str = os.path.basename(zip_file_path)\n zip_file_str = os.path.splitext(zip_file_str)[0]\n for file_name in os.listdir(unzip_dir):\n if zip_file_str in file_name:\n os.remove(os.path.join(unzip_dir, file_name))\n raise zip_error"
] | [
"0.65115666",
"0.5753972",
"0.56500095",
"0.5603992",
"0.5527236",
"0.5507701",
"0.5500255",
"0.54809153",
"0.5445222",
"0.53783196",
"0.53759146",
"0.5262364",
"0.5165045",
"0.51642126",
"0.51533556",
"0.5140736",
"0.5131286",
"0.5126001",
"0.508101",
"0.5036791",
"0.5029284",
"0.5022193",
"0.50142807",
"0.5006986",
"0.4973985",
"0.49730596",
"0.49456292",
"0.49356204",
"0.49260786",
"0.4911447"
] | 0.773322 | 0 |
ExtractMod(mod, dst=None, pw=None, range=None, overwrite=True) Checks for a mod's existence and installs it if it exists. 'dst' will be the final location. Defaults to the temp folder if unspecified. 'pw' will be fed as the password to the ExtractFile function. 'range' will be the range for which to check files; it's a twotuple. 'overwrite' determines if it should overwrite currently existing files in folders if filenames clash. Returns the destination. | def ExtractMod(mod, dst=None, pw=None, range=None, overwrite=True):
file = getattr(fl, mod)
if dst is None:
dst = var.BOOTLEG_TEMP + mod
try:
if range is None:
FindFile(file)
else: # explicit override of the range function - yay __builtins__ :D
for num in __builtins__.range(*range):
FindFile(file.format(num))
log.logger("PARS_INSTALLING", format=[mod])
ExtractFile(file, None, pw)
CopyFolder(var.BOOTLEG_TEMP + file, dst, overwrite)
log.logger("PARS_COMPLETED", format=[mod])
except FileNotFoundError:
CallSkipMod(file)
return dst | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ExtractFile(file, dst=None, pw=None):\n\n path, file = FindFile(file)\n\n if file.endswith(\".rar\"):\n type = \"rar\"\n elif file.endswith((\".zip\", \".7z\")):\n type = \"zip\"\n else:\n type = None\n\n if dst is None:\n dst = file\n if not dst.endswith((\"/\", \"\\\\\")):\n dst = dst + \"\\\\\"\n if not path.endswith((\"/\", \"\\\\\")):\n path = path + \"\\\\\"\n\n if pw is None:\n pw = \"none\"\n\n if type == \"rar\": # Rar file\n subprocess.Popen([var.RAR_LOCATION, \"x\", \"-y\", \"-p\" + pw, path+file, var.BOOTLEG_TEMP + dst]).wait()\n elif type == \"zip\": # Zip file\n subprocess.Popen([var.SEVENZ_LOCATION, \"x\", \"-p\" + pw, \"-y\", \"-o\" + var.BOOTLEG_TEMP + dst, path + file]).wait()\n else: # No type, just copy it over\n shutil.copy(path + file, var.BOOTLEG_TEMP + dst + file)\n\n log.logger(\"PARS_EXTR_FILE\", format=[path + file], display=False)\n return var.BOOTLEG_TEMP + dst",
"async def extract(self, destination, entries=None, callback=None):\n # :param srcdestpairs: A list of 2-tuples where the first item\n # is the source path within the archive of a file to install,\n # and the second item is the path (relative to the mod\n # installation directory) where the source should be extracted.\n\n # TODO: ignore \"._\"-prefixed mac-cruft files\n loop = asyncio.get_event_loop()\n c=0\n # noinspection PyTypeChecker\n async for extracted in self.archiver.extract(\n archive=self.archive,\n destination=destination,\n specific_entries=entries,\n # callback=callback\n ):\n c+=1\n if callback:\n loop.call_soon_threadsafe(callback, extracted, c)\n self.LOGGER << f\"{c} files extracted\"\n\n # srcdestpairs = srcdestpairs,",
"def maybe_extract(filename):\n ext = path.splitext(filename)[1]\n if ext not in EXTRACTORS.keys():\n return None\n # Append the full filepath to the tempdir\n tempdir_root = tempfile.mkdtemp()\n tempdir = path.join(tempdir_root, filename.lstrip('/'))\n os.makedirs(tempdir)\n EXTRACTORS[ext](filename, tempdir)\n rchmod(tempdir_root)\n return tempdir_root",
"def extract_via_patoolib(\r\n file_path: str, unpack_path: str = None, remove_if_exists: bool = False\r\n) -> Optional[str]:\r\n # TODO handle compression with -zvxf\r\n if not os.path.exists(file_path):\r\n log.warning(file_path + \" does not exist.\")\r\n return None\r\n\r\n try:\r\n import patoolib\r\n except ImportError:\r\n log.warning(\"patoolib is not installed: Run pip install patool\")\r\n return None\r\n\r\n if not unpack_path:\r\n unpack_path = os.path.join(\r\n os.path.dirname(file_path), os.path.splitext(os.path.basename(file_path))[0]\r\n )\r\n\r\n if os.path.isdir(unpack_path):\r\n log.info(\"Unpack directory already exists \" + unpack_path)\r\n if not os.listdir(unpack_path):\r\n log.info(\"Directory is empty. Unpacking...\")\r\n elif remove_if_exists:\r\n log.info(\"Removing existing unpacked dir: \" + unpack_path)\r\n shutil.rmtree(unpack_path)\r\n else:\r\n return unpack_path\r\n\r\n try:\r\n patoolib.extract_archive(file_path, outdir=unpack_path)\r\n except Exception as e:\r\n log.warning(\"Failed to unpack via patoolib: \", exc_info=e)\r\n return None\r\n\r\n return unpack_path",
"def save_mod_instance(self, mod):\r\n modentry, created = ModEntry.get_or_create(name=mod[\"name\"], service=self.service)\r\n\r\n # Optional entries\r\n for field in [\"version\", \"description\", \"filehash\", \"filesize\", \"homepage\", \"author\", \"category\", \"filename\", \"magnet\", \"torrent\"]:\r\n if mod.get(field):\r\n setattr(modentry, field, mod[field])\r\n \r\n modentry.save()\r\n \r\n return modentry, created",
"def _extract_zip(src, dst):\n # check if src is a valid .zip\n assert zipfile.is_zipfile(src), \"{} is not a valid .zip file.\".format(src)\n\n zip_file = zipfile.ZipFile(src, \"r\")\n for file in zip_file.namelist():\n zip_file.extract(file, dst)",
"def extract_patch(opts, patch_start, patch_length, ucode_file, ucode_level):\n cwd = os.getcwd()\n\n if not os.path.exists(opts.extract):\n os.makedirs(opts.extract)\n\n os.chdir(opts.extract)\n\n ucode_file.seek(patch_start, 0)\n out_file_name = \"mc_patch_0%x.bin\" % (ucode_level)\n out_file = open(out_file_name, \"wb\")\n out_file.write(ucode_file.read(patch_length))\n out_file.close()\n\n print(\" Patch extracted to %s/%s\" % (os.getcwd(), out_file_name))\n\n os.chdir(cwd)",
"def _prepare_dst_dir(self, dst, src=None, perm=None, **kwargs):\n rstat = self.exists(dst, stat=True)\n\n if rstat:\n if self.file_interface.isdir(dst, stat=rstat) and src:\n full_dst = os.path.join(dst, os.path.basename(src))\n else:\n full_dst = dst\n\n else:\n # interpret dst as a file name, create missing dirs\n dst_dir = self.dirname(dst)\n if dst_dir and self.create_file_dir and not self.isdir(dst_dir):\n self.mkdir(dst_dir, perm=perm, recursive=True, **kwargs)\n full_dst = dst\n\n return full_dst",
"def install(src, dst):\n try:\n dst = os.path.join(install_dir, dst, os.path.basename(src))\n src = os.path.join(source_dir, src)\n assert os.path.isfile(src)\n assert not os.path.isdir(dst)\n if not os.path.isdir(os.path.dirname(dst)):\n os.makedirs(os.path.dirname(dst))\n shutil.copy(src, dst)\n print 'Installed', dst\n except Exception:\n print 'Could not install', dst",
"def install(src, perm, dest, cmds, comp, verbose=False):\n if comp == Cmp.nosrc:\n ansiprint(\n f\"The source file '{src}' does not exist.\", fg=Color.black, bg=Color.red\n )\n elif comp == Cmp.same:\n return\n try:\n if os.path.exists(dest):\n os.chmod(dest, stat.S_IRUSR | stat.S_IWUSR)\n copyfile(src, dest)\n os.chmod(dest, perm)\n if cmds and subprocess.call(cmds) != 0:\n ansiprint(f\"Post-install commands for {dest} failed.\", fg=Color.red)\n except Exception as e:\n ansiprint(f\"Installing '{src}' as '{dest}' failed: {e}\", fg=Color.red)\n return\n ansiprint(f\"File '{src}' was successfully installed as '{dest}'.\", fg=Color.green)",
"def _import_from(mod, path, mod_dir=None):\n\n if mod_dir is None:\n mod_dir = mod\n\n if not os.path.exists(path):\n return None\n\n if not os.path.exists(os.path.join(path, mod_dir)):\n return None\n\n try:\n mod_info = imp.find_module(mod_dir, [path])\n return imp.load_module(mod, *mod_info)\n except ImportError:\n return None",
"def get_ifmod(self, aug_conf_path: str, mod: str) -> str:\n if_mods = self.aug.match((\"%s/IfModule/*[self::arg='%s']\" %\n (aug_conf_path, mod)))\n if not if_mods:\n return self.create_ifmod(aug_conf_path, mod)\n\n # Strip off \"arg\" at end of first ifmod path\n return if_mods[0].rpartition(\"arg\")[0]",
"def extract_archive(\n fname: str, outfile: Optional[str] = None, concat: bool = False\n) -> Union[str, None]:\n if fname.endswith((\".tgz\", \".tar.gz\")):\n return extract_tarball(fname, outfile=outfile)\n elif fname.endswith(\".gz\"):\n return extract_gzip(fname, outfile=outfile)\n elif fname.endswith(\n \".zip\",\n ):\n return extract_zip(fname, outfile=outfile, concat=concat)",
"def clone(src: str, dst: str):\n if dst is None:\n dst = getcwd()\n destination = path.abspath(dst)\n # TODO: replace with false this is just for testing:\n makedirs(destination, exist_ok=True)\n\n sync_chunk(src, destination)\n copy(src, destination)",
"def extract(cls, path, outdir):\r\n raise NotImplementedError()",
"def check_manifest_and_rename(self, required_digest, tmp, extract = None, try_helper = False):\n\t\tif extract:\n\t\t\textracted = os.path.join(tmp, extract)\n\t\t\tif not os.path.isdir(extracted):\n\t\t\t\traise Exception(_('Directory %s not found in archive') % extract)\n\t\telse:\n\t\t\textracted = tmp\n\n\t\tfrom . import manifest\n\n\t\tmanifest.fixup_permissions(extracted)\n\n\t\talg, required_value = manifest.splitID(required_digest)\n\t\tactual_digest = alg.getID(manifest.add_manifest_file(extracted, alg))\n\t\tif actual_digest != required_digest:\n\t\t\traise BadDigest(_('Incorrect manifest -- archive is corrupted.\\n'\n\t\t\t\t\t'Required digest: %(required_digest)s\\n'\n\t\t\t\t\t'Actual digest: %(actual_digest)s\\n') %\n\t\t\t\t\t{'required_digest': required_digest, 'actual_digest': actual_digest})\n\n\t\tif try_helper:\n\t\t\tif self._add_with_helper(required_digest, extracted):\n\t\t\t\tsupport.ro_rmtree(tmp)\n\t\t\t\treturn\n\t\t\tinfo(_(\"Can't add to system store. Trying user store instead.\"))\n\n\t\tinfo(_(\"Caching new implementation (digest %s) in %s\"), required_digest, self.dir)\n\n\t\tfinal_name = os.path.join(self.dir, required_digest)\n\t\tif os.path.isdir(final_name):\n\t\t\traise Exception(_(\"Item %s already stored.\") % final_name) # XXX: not really an error\n\n\t\t# If we just want a subdirectory then the rename will change\n\t\t# extracted/.. and so we'll need write permission on 'extracted'\n\n\t\tos.chmod(extracted, 0o755)\n\t\tos.rename(extracted, final_name)\n\t\tos.chmod(final_name, 0o555)\n\n\t\tif extract:\n\t\t\tos.rmdir(tmp)",
"def _prepare_dst_dir(self, dst, src=None, perm=None, **kwargs):\n if self.isdir(dst):\n full_dst = os.path.join(dst, os.path.basename(src)) if src else dst\n\n elif self.isfile(dst):\n full_dst = dst\n\n else:\n # interpret dst as a file name, create missing dirs\n dst_dir = self.dirname(dst)\n if dst_dir and self.create_file_dir and not self.isdir(dst_dir):\n self.mkdir(dst_dir, perm=perm, recursive=True)\n full_dst = dst\n\n return full_dst",
"def _install_file(srcdir, filename, dstdir):\n srcfilename = os.path.join(srcdir, filename)\n dstfilename = os.path.join(dstdir, filename)\n if not os.path.exists(srcfilename):\n if os.path.exists(dstfilename):\n subprocess.run(['rm', dstfilename], check=True)\n return (False, True)\n return (False, False)\n\n equal = subprocess.run(['diff', '-q', srcfilename, dstfilename],\n check=False,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL).returncode == 0\n if not equal:\n subprocess.run(['mv', srcfilename, dstfilename], check=True)\n return (True, not equal)",
"def create_ifmod(self, aug_conf_path: str, mod: str) -> str:\n c_path = \"{}/IfModule[last() + 1]\".format(aug_conf_path)\n c_path_arg = \"{}/IfModule[last()]/arg\".format(aug_conf_path)\n self.aug.set(c_path, \"\")\n retpath = \"{}/IfModule[last()]/\".format(aug_conf_path)\n self.aug.set(c_path_arg, mod)\n return retpath",
"def extract(fspec: pathlib.Path, dspec: pathlib.Path) -> bool:\n try:\n os.makedirs(str(dspec))\n except FileExistsError:\n pass\n try:\n with zipfile.ZipFile(str(fspec), \"r\") as f:\n f.extractall(str(dspec))\n return True\n except Exception: # noqa\n return False",
"def download_file(src, dst):\n subprocess.check_output(cmd_preamble + [\"cp\", f\"jot://{src}\", dst])",
"def check_extract_from_egg(pth, todir=None):\n rv = []\n if os.path.altsep:\n pth = pth.replace(os.path.altsep, os.path.sep)\n components = pth.split(os.path.sep)\n for i, name in enumerate(components):\n if name.lower().endswith(\".egg\"):\n eggpth = os.path.sep.join(components[:i + 1])\n if os.path.isfile(eggpth):\n # eggs can also be directories!\n try:\n egg = zipfile.ZipFile(eggpth)\n except zipfile.BadZipfile as e:\n raise SystemExit(\"Error: %s %s\" % (eggpth, e))\n if todir is None:\n # Use the same directory as setuptools/pkg_resources. So,\n # if the specific egg was accessed before (not necessarily\n # by pyinstaller), the extracted contents already exist\n # (pkg_resources puts them there) and can be used.\n todir = os.path.join(pkg_resources_get_default_cache(),\n name + \"-tmp\")\n if components[i + 1:]:\n members = [\"/\".join(components[i + 1:])]\n else:\n members = egg.namelist()\n for member in members:\n pth = os.path.join(todir, member)\n if not os.path.isfile(pth):\n dirname = os.path.dirname(pth)\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n with open(pth, \"wb\") as f:\n f.write(egg.read(member))\n rv.append((pth, eggpth, member))\n return rv\n return [(pth, None, None)]",
"def unpack(filename: Union[str, Path], extract_to: Union[str, Path]) -> None:\n raise NotImplemented",
"def extract(self, condition='*', path='.', withSubpath=True, overwrite=True):\r\n checker = condition2checker(condition)\r\n return RarFileImplementation.extract(self, checker, path, withSubpath, overwrite)",
"def SshExtractZip(host, zipname, dst):\n command = ['ssh', host, 'unzip', '-o', '-d', dst, zipname]\n result = RunCommand(command)\n if result:\n raise ExternalError('Failed to ssh unzip -o -d \"%s\" \"%s\" on \"%s\" (%s)' %\n (dst, zipname, host, result))\n\n # unzip will create directories with access 700, which is not often what we\n # need. Fix the permissions for the whole archive.\n command = ['ssh', host, 'chmod', '-R', '755', dst]\n result = RunCommand(command)\n if result:\n raise ExternalError('Failed to ssh chmod -R 755 \"%s\" on \"%s\" (%s)' %\n (dst, host, result))",
"def resolve(impmod, nameparts):\n if not nameparts:\n return None\n\n m = impmod\n for nname in nameparts:\n m = getattr(m, nname, None)\n if m is None:\n break\n\n return m",
"def copy_file(src, dst, preserve_mode=1, preserve_times=1, update=0,\n link=None, verbose=1, dry_run=0):\n # XXX if the destination file already exists, we clobber it if\n # copying, but blow up if linking. Hmmm. And I don't know what\n # macostools.copyfile() does. Should definitely be consistent, and\n # should probably blow up if destination exists and we would be\n # changing it (ie. it's not already a hard/soft link to src OR\n # (not update) and (src newer than dst).\n\n import os\n from distutils.file_util import _copy_file_contents, _copy_action\n from distutils import log\n\n from distutils.dep_util import newer\n from stat import ST_ATIME, ST_MTIME, ST_MODE, S_IMODE\n\n if os.path.islink(src):\n if os.path.exists(dst):\n os.unlink(dst)\n os.symlink(os.readlink(src), dst)\n return (dst, 1)\n\n if not os.path.isfile(src):\n raise DistutilsFileError(\n \"can't copy '%s': doesn't exist or not a regular file\" % src)\n\n if os.path.isdir(dst):\n dir = dst\n dst = os.path.join(dst, os.path.basename(src))\n else:\n dir = os.path.dirname(dst)\n\n if update and not newer(src, dst):\n if verbose >= 1:\n log.debug(\"not copying %s (output up-to-date)\", src)\n return (dst, 0)\n\n try:\n action = _copy_action[link]\n except KeyError:\n raise ValueError(\"invalid value '%s' for 'link' argument\" % link)\n\n if verbose >= 1:\n if os.path.basename(dst) == os.path.basename(src):\n log.info(\"%s %s -> %s\", action, src, dir)\n else:\n log.info(\"%s %s -> %s\", action, src, dst)\n\n if dry_run:\n return (dst, 1)\n\n # If linking (hard or symbolic), use the appropriate system call\n # (Unix only, of course, but that's the caller's responsibility)\n elif link == 'hard':\n if not (os.path.exists(dst) and os.path.samefile(src, dst)):\n try:\n os.link(src, dst)\n return (dst, 1)\n except OSError:\n # If hard linking fails, fall back on copying file\n # (some special filesystems don't support hard linking\n # even under Unix, see issue #8876).\n pass\n elif link == 'sym':\n if not (os.path.exists(dst) and os.path.samefile(src, dst)):\n os.symlink(src, dst)\n return (dst, 1)\n\n # Otherwise (non-Mac, not linking), copy the file contents and\n # (optionally) copy the times and mode.\n _copy_file_contents(src, dst)\n if preserve_mode or preserve_times:\n st = os.stat(src)\n\n # According to David Ascher <[email protected]>, utime() should be done\n # before chmod() (at least under NT).\n if preserve_times:\n os.utime(dst, (st[ST_ATIME], st[ST_MTIME]))\n if preserve_mode:\n os.chmod(dst, S_IMODE(st[ST_MODE]))\n\n return (dst, 1)",
"def do_install(self, args):\n if args:\n try:\n plugin_name, file_path = args.split()[0], args.split()[1]\n except Exception as e:\n return print(display_messages(\"the argument is invalid please type ?install for more information\", error=True))\n if not path.isfile(file_path):\n return print(\n display_messages(\n \"the file {} not found \".format(file_path), error=True\n )\n )\n head, tail = os.path.split(file_path)\n dest = copyfile(file_path, \"{}/{}\".format(self.temp_path, tail))\n print(display_messages(\"copy content file .zip to {}\".format(dest), info=True))\n \n path_to_zip_file = tempfile.gettempdir() + \"/{}\".format(tail)\n with ZipFile(path_to_zip_file, \"r\") as zip_ref:\n zip_ref.extractall(tempfile.gettempdir())\n temp_path_file_extracted = \"{}/{}.py\".format(self.temp_path, plugin_name)\n print(\n display_messages(\n \"extracted files on : {}\".format(temp_path_file_extracted), info=True\n )\n )\n if not path.isfile(temp_path_file_extracted):\n return print(\n display_messages(\n \"the file {} not found \".format(temp_path_file_extracted), error=True\n )\n )\n temp_templates_path = \"{}/{}\".format(self.temp_path, plugin_name)\n if not path.isdir(temp_templates_path):\n return print(\n display_messages(\n \"the directory template {} not found \".format(temp_templates_path), error=True\n )\n )\n source = temp_path_file_extracted\n destination = \"{}/{}.py\".format(self.captiveflask_setup_path, plugin_name)\n dest = copyfile(source, destination)\n print(display_messages(\"copy content file to {}\".format(dest), info=True))\n\n copy_tree(\n temp_templates_path, C.user_config_dir + \"/config/templates/{}\".format(plugin_name)\n )\n print(\n display_messages(\n \"plugin {} install {}\".format( plugin_name,setcolor(\"sucessful\", color=\"green\")),\n info=True,\n )\n )\n return \n print(\n display_messages(\"unknown command: {} \".format(args), error=True)\n )",
"def extract(filename, code, into):\n\n _, ext = os.path.splitext(filename)\n user_code_dir = os.path.join(into, 'user_code')\n os.mkdir(user_code_dir)\n contents = code\n\n if ext in ZIPS:\n # it's a zip file\n zip_file = os.path.join(into, 'contents.zip')\n with open(zip_file, 'w') as f:\n f.write(contents)\n zip = zipfile.ZipFile(zip_file)\n zip.extractall(user_code_dir)\n\n elif ext in TARBALLS:\n # it's a tarball\n tarball = os.path.join(into, 'contents.tgz')\n with open(tarball, 'w') as f:\n f.write(contents)\n tar = tarfile.open(tarball)\n tar.extractall(user_code_dir)\n\n elif ext in EXTENSIONS.keys():\n # it's a module\n module = os.path.join(user_code_dir, filename)\n with open(module, 'w') as f:\n f.write(contents)\n\n else:\n raise APIException(\n 'unknown extension: {0}'.format(filename), 400)\n\n return user_code_dir",
"def copy_dir(src=\"\", dst=\"\", header=\"\", footer=\"\", clip=0, ext=\"\", test=False):\n failed = []\n nfiles = 0\n if not os.path.exists(dst):\n os.makedirs(dst)\n if not os.path.exists(src):\n raise argparse.ArgumentError(\"source does not exist! It must be a directory.\")\n else:\n for root, dirs, files in os.walk(src, topdown=False):\n for name in files:\n name_wo_ext, file_ext = os.path.splitext(name)\n\n src_path = os.path.join(root, name)\n dstfilename = header + os.path.join(root[len(src)+1:], name_wo_ext[clip:]) + footer + file_ext\n dst_path = os.path.join(dst, dstfilename)\n\n dst_pdir = os.path.dirname(dst_path)\n if not os.path.exists(dst_pdir):\n os.makedirs(dst_pdir)\n\n if not os.path.exists(dst_path):\n if ext == \"\" or ext == file_ext[1:]:\n try:\n shutil.copy(src_path, dst_path)\n except:\n failed.append(src_path)\n print(f\"... {src_path} failed\")\n else:\n print(f\"... {dst_path} already exists'. Skipping\")\n nfiles += 1\n\n if test:\n break\n if test:\n break\n print(f\"{nfiles - len(failed)} / {nfiles} files were copied.\")\n return failed"
] | [
"0.5237817",
"0.5110865",
"0.47217962",
"0.47020456",
"0.46294928",
"0.45337695",
"0.44764635",
"0.44509658",
"0.44130152",
"0.43863454",
"0.43481404",
"0.4293722",
"0.42685872",
"0.42552644",
"0.4250608",
"0.423482",
"0.42136598",
"0.42102575",
"0.42053008",
"0.41918778",
"0.4166381",
"0.41541207",
"0.41426513",
"0.4141138",
"0.41381654",
"0.41336548",
"0.41293153",
"0.4124877",
"0.40945008",
"0.40741172"
] | 0.8552399 | 0 |
ExtractFolder(path) Extracts all the archives from a folder into that same folder. Returns a tuple of all the resulting folders' names. | def ExtractFolder(path):
if not path.endswith(("/", "\\")):
path = path + "\\"
folders = []
files = []
for file in os.listdir(path):
files.append(path + file)
_file, ext = GetName(file)
folder = ExtractFile(path + file)
CopyFolder(folder, path + _file)
folders.append(path + _file)
DeleteFile(*files)
return tuple(folders) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _extract_archive(path: str, extracted_dir_path: str) -> str:\n logging.info('extracting %s to %s', path, extracted_dir_path)\n with tarfile.open(path) as tar:\n tar.extractall(path=extracted_dir_path)\n extracted_items = os.listdir(extracted_dir_path)\n if len(extracted_items) != 1:\n raise ValueError(\n 'archive at {} did not contain a single directory'.format(path))\n return os.path.join(extracted_dir_path, extracted_items[0])",
"def extract(self, archive_path: str, extracted_path: str) -> None:\n if not os.listdir(archive_path):\n self.log.warning(\n \"No files found in directory: {}\".format(archive_path))\n return\n\n for root, _, archive_files in os.walk(archive_path):\n if not archive_files:\n continue\n\n extract_to = os.path.normpath(os.path.join(\n extracted_path,\n os.path.relpath(root, archive_path)\n ))\n if not os.path.isdir(extract_to):\n os.makedirs(extract_to)\n\n for zfile in archive_files:\n zfile = os.path.join(root, zfile)\n filename, ext = os.path.splitext(os.path.basename(zfile))\n # unzip (tree) each archive file in archive_path\n if ext in self.zip_ext:\n # double splitext for .tar.gz\n fname, ext = os.path.splitext(os.path.basename(filename))\n if ext == '.tar':\n filename = fname\n self.log.info(\"Extracting from: {}\".format(zfile))\n self.log.info(\" Extracting to: {}\".format(\n os.path.join(extract_to, filename)))\n unzip(\n zfile,\n extract_to,\n zip_ext=self.zip_ext,\n create_own_folder=True,\n tree=True\n )\n\n # move each non-archive file in archive_path\n else:\n dest = os.path.join(extract_to, os.path.basename(zfile))\n self.log.info(\"Copying from: {}\".format(zfile))\n self.log.info(\" Copying to: {}\".format(dest))\n shutil.copy(zfile, dest)",
"def extract(cls, path, outdir):\r\n with open_zip(path) as zip:\r\n for path in zip.namelist():\r\n # While we're at it, we also perform this safety test.\r\n if path.startswith('/') or path.startswith('..'):\r\n raise ValueError('Zip file contains unsafe path: %s' % path)\r\n # Ignore directories. extract() will create parent dirs as needed.\r\n if not path.endswith('/'):\r\n zip.extract(path, outdir)",
"def download_extract(name, folder=None): #@save\n fname = download(name)\n base_dir = os.path.dirname(fname)\n data_dir, ext = os.path.splitext(fname)\n if ext == '.zip':\n fp = zipfile.ZipFile(fname, 'r')\n elif ext in ('.tar', '.gz'):\n fp = tarfile.open(fname, 'r')\n else:\n assert False, 'Only zip/tar files can be extracted.'\n fp.extractall(base_dir)\n return os.path.join(base_dir, folder) if folder else data_dir",
"def download_extract(name, folder=None):\n fname = download(name)\n base_dir = os.path.dirname(fname)\n data_dir, ext = os.path.splitext(fname)\n if ext == '.zip':\n fp = zipfile.ZipFile(fname, 'r')\n elif ext in ('.tar', '.gz'):\n fp = tarfile.open(fname, 'r')\n else:\n assert False, 'Only zip/tar files can be extracted.'\n fp.extractall(base_dir)\n return os.path.join(base_dir, folder) if folder else data_dir",
"def extract_zip(zip_path, target_folder):\n with zipfile.ZipFile(zip_path) as archive:\n archive.extractall(target_folder)",
"async def unarchive_dir(\n archive_to_extract: Path,\n destination_folder: Path,\n *,\n max_workers: int = _MAX_UNARCHIVING_WORKER_COUNT,\n progress_bar: ProgressBarData | None = None,\n log_cb: Callable[[str], Awaitable[None]] | None = None,\n) -> set[Path]:\n if not progress_bar:\n progress_bar = ProgressBarData(steps=1)\n async with AsyncExitStack() as zip_stack:\n zip_file_handler = zip_stack.enter_context(\n zipfile.ZipFile( # pylint: disable=consider-using-with\n archive_to_extract,\n mode=\"r\",\n )\n )\n zip_stack.enter_context(logging_redirect_tqdm())\n process_pool = zip_stack.enter_context(\n non_blocking_process_pool_executor(max_workers=max_workers)\n )\n\n # running in process poll is not ideal for concurrency issues\n # to avoid race conditions all subdirectories where files will be extracted need to exist\n # creating them before the extraction is under way avoids the issue\n # the following avoids race conditions while unzippin in parallel\n _ensure_destination_subdirectories_exist(\n zip_file_handler=zip_file_handler,\n destination_folder=destination_folder,\n )\n\n futures: list[asyncio.Future] = [\n asyncio.get_event_loop().run_in_executor(\n process_pool,\n # ---------\n _zipfile_single_file_extract_worker,\n archive_to_extract,\n zip_entry,\n destination_folder,\n zip_entry.is_dir(),\n )\n for zip_entry in zip_file_handler.infolist()\n ]\n\n try:\n extracted_paths: list[Path] = []\n total_file_size = sum(\n zip_entry.file_size for zip_entry in zip_file_handler.infolist()\n )\n async with AsyncExitStack() as progress_stack:\n sub_prog = await progress_stack.enter_async_context(\n progress_bar.sub_progress(steps=total_file_size)\n )\n tqdm_progress = progress_stack.enter_context(\n tqdm.tqdm(\n desc=f\"decompressing {archive_to_extract} -> {destination_folder} [{len(futures)} file{'s' if len(futures) > 1 else ''}\"\n f\"/{_human_readable_size(archive_to_extract.stat().st_size)}]\\n\",\n total=total_file_size,\n **_TQDM_MULTI_FILES_OPTIONS,\n )\n )\n for future in asyncio.as_completed(futures):\n extracted_path = await future\n extracted_file_size = extracted_path.stat().st_size\n if tqdm_progress.update(extracted_file_size) and log_cb:\n with log_catch(log, reraise=False):\n await log_cb(f\"{tqdm_progress}\")\n await sub_prog.update(extracted_file_size)\n extracted_paths.append(extracted_path)\n\n except Exception as err:\n for f in futures:\n f.cancel()\n\n # wait until all tasks are cancelled\n await asyncio.wait(\n futures, timeout=2 * _MIN, return_when=asyncio.ALL_COMPLETED\n )\n\n # now we can cleanup\n if destination_folder.exists() and destination_folder.is_dir():\n await remove_directory(destination_folder, ignore_errors=True)\n\n raise ArchiveError(\n f\"Failed unarchiving {archive_to_extract} -> {destination_folder} due to {type(err)}.\"\n f\"Details: {err}\"\n ) from err\n\n # NOTE: extracted_paths includes all tree leafs, which might include files and empty folders\n return {\n p\n for p in extracted_paths\n if p.is_file() or (p.is_dir() and not any(p.glob(\"*\")))\n }",
"def StripFolder(path):\n\n if not path.endswith((\"/\", \"\\\\\")):\n path = path + \"\\\\\"\n folders = [path]\n allf = []\n while folders:\n folder = folders.pop(0)\n allf.append(folder)\n for lister in os.listdir(folder):\n if os.path.isdir(folder + lister):\n folders.append(folder + lister + \"\\\\\")\n elif not path == folder:\n CopyFolder(folder, path)\n shutil.rmtree(folder)\n\n return tuple(allf)",
"def unpack_archive(\n filepath: types.PathLike, *, extract_dir: Optional[types.PathLike] = None\n) -> types.PathLike:\n filepath = utils.to_path(filepath).resolve()\n if not extract_dir:\n extract_dir = str(filepath.parent)\n filepath = str(filepath)\n os.makedirs(extract_dir, exist_ok=True)\n is_zipfile = zipfile.is_zipfile(filepath)\n is_tarfile = tarfile.is_tarfile(filepath)\n if not is_zipfile and not is_tarfile:\n LOGGER.debug(\"'%s' is not an archive\", filepath)\n return extract_dir\n else:\n LOGGER.info(\"extracting data from archive file '%s'\", filepath)\n shutil.unpack_archive(filepath, extract_dir=extract_dir, format=None)\n # we want to rename the unpacked directory to a consistent value\n # unfortunately, shutil doesn't pass this back to us\n # so, we get the root path of all the constituent members\n if is_zipfile:\n with zipfile.ZipFile(filepath, mode=\"r\") as zf:\n members = zf.namelist()\n else:\n with tarfile.open(filepath, mode=\"r\") as tf:\n members = tf.getnames()\n src_basename = os.path.commonpath(members)\n dest_basename = os.path.basename(filepath)\n if src_basename:\n while True:\n tmp, _ = os.path.splitext(dest_basename)\n if tmp == dest_basename:\n break\n else:\n dest_basename = tmp\n if src_basename != dest_basename:\n return shutil.move(\n os.path.join(extract_dir, src_basename),\n os.path.join(extract_dir, dest_basename),\n )\n else:\n return os.path.join(extract_dir, src_basename)\n else:\n return extract_dir",
"def parse_folder(self, path):\n\n data = []\n for filename in os.listdir(path):\n data.append(self.parse_file(os.path.join(path, filename), filename))\n return data",
"def unzip(path, filename_as_folder=False):\n for filename in os.listdir(path):\n if filename.endswith(\".zip\"):\n name = os.path.splitext(os.path.basename(filename))[0]\n if not os.path.isdir(name):\n try:\n file = os.path.join(path, filename)\n zip = ZipFile(file)\n if filename_as_folder:\n directory = os.path.join(path, name)\n os.mkdir(directory)\n print(\"Unzipping {} to {}\".format(filename, directory))\n zip.extractall(directory)\n else:\n print(\"Unzipping {} to {}\".format(filename, path))\n zip.extractall(path)\n except BadZipfile:\n print(\"BAD ZIP: \" + filename)\n try:\n os.remove(file)\n except OSError as e: # this would be \"except OSError, e:\" before Python 2.6\n if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory\n raise # re-raise exception if a different error occured",
"def unzip_file(path_to_zip_file, directory_to_extract_to):\n \n with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:\n zip_ref.extractall(directory_to_extract_to)\n\n return",
"def extract(archive_path, images_dir, test_zip=False):\n log(\"TRACE\", \"Attempting to extracted files from {}\".format(archive_path))\n with zipfile.ZipFile(archive_path) as images_zip:\n # Check that the Zip file is valid, in which case `testzip()` returns\n # None. If it's bad, that function will return a list of bad files\n try:\n if test_zip and images_zip.testzip():\n log(\"ERROR\", \"Could not extract the following invalid Zip file:\"\n \" {}\".format(archive_path))\n return []\n except OSError:\n log(\"ERROR\", \"Could not extract the following invalid Zip file:\"\n \" {}\".format(archive_path))\n return []\n images_zip.extractall(images_dir)\n archive_namelist = images_zip.namelist()\n log(\"TRACE\", \"Extracted files: {}\".format(archive_namelist))\n return archive_namelist",
"def uncompress(path, dest=None, remove=True):\n # assert path.endswith('.zip')\n prefix, ext = split_if_compressed(path)\n folder_name = os.path.basename(prefix)\n file_is_zip = ext == '.zip'\n root_of_folder = None\n if ext == '.gz':\n try:\n with gzip.open(path, 'rb') as f_in, open(prefix, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n except Exception as e:\n remove_file(prefix)\n remove_file(path)\n raise e\n else:\n try:\n with zipfile.ZipFile(path, \"r\") if ext == '.zip' else tarfile.open(path, 'r:*') as archive:\n if not dest:\n namelist = sorted(archive.namelist() if file_is_zip else archive.getnames())\n if namelist[0] == '.':\n namelist = namelist[1:]\n namelist = [p[len('./'):] if p.startswith('./') else p for p in namelist]\n if ext == '.tgz':\n roots = set(x.split('/')[0] for x in namelist)\n if len(roots) == 1:\n root_of_folder = next(iter(roots))\n else:\n # only one file, root_of_folder = ''\n root_of_folder = namelist[0].strip('/') if len(namelist) > 1 else ''\n if all(f.split('/')[0] == root_of_folder for f in namelist[1:]) or not root_of_folder:\n dest = os.path.dirname(path) # only one folder, unzip to the same dir\n else:\n root_of_folder = None\n dest = prefix # assume zip contains more than one file or folder\n print('Extracting {} to {}'.format(path, dest))\n archive.extractall(dest)\n if root_of_folder:\n if root_of_folder != folder_name:\n # move root to match folder name\n os.rename(path_join(dest, root_of_folder), path_join(dest, folder_name))\n dest = path_join(dest, folder_name)\n elif len(namelist) == 1:\n dest = path_join(dest, namelist[0])\n except Exception as e:\n remove_file(path)\n if os.path.exists(prefix):\n if os.path.isfile(prefix):\n os.remove(prefix)\n elif os.path.isdir(prefix):\n shutil.rmtree(prefix)\n raise e\n if remove:\n remove_file(path)\n return dest",
"def parse_folder(file_folder: str) -> Tuple[list, list, list]:\n\n raw_files = [\n _\n for _ in os.listdir(file_folder)\n if _.lower().endswith(\".raw\") or _.lower().endswith(\".d\") or _.lower().endswith(\".mzml\")\n ]\n fasta_files = [_ for _ in os.listdir(file_folder) if _.lower().endswith(\".fasta\")]\n db_files = [\n _ for _ in os.listdir(file_folder) if _.lower().endswith(\".db_data.hdf\")\n ]\n\n return raw_files, fasta_files, db_files",
"def unpack_archive(\r\n file_path: str, unpack_path: str = None, remove_if_exists: bool = False\r\n) -> Optional[str]:\r\n import tarfile\r\n import zipfile\r\n\r\n if not os.path.isfile(file_path):\r\n log.warning(\"File does not exist: \" + file_path)\r\n return None\r\n\r\n if zipfile.is_zipfile(file_path):\r\n unpack_path = extract_zip(file_path, unpack_path, remove_if_exists)\r\n elif tarfile.is_tarfile(file_path):\r\n unpack_path = extract_tar(file_path, unpack_path, remove_if_exists)\r\n else:\r\n unpack_path = extract_via_patoolib(file_path, unpack_path, remove_if_exists)\r\n\r\n if unpack_path and os.path.isdir(unpack_path):\r\n unpack_folder_name = os.path.basename(unpack_path)\r\n if len(os.listdir(unpack_path)) == 1 and unpack_folder_name in os.listdir(\r\n unpack_path\r\n ):\r\n # unpacked folder contains one folder with same name -> move content to higher up folder\r\n folder_to_move = os.path.join(unpack_path, unpack_folder_name)\r\n files = os.listdir(folder_to_move)\r\n for f in files:\r\n shutil.move(os.path.join(folder_to_move, f), unpack_path)\r\n\r\n # Remove empty folder\r\n if len(os.listdir(folder_to_move)) == 0:\r\n os.rmdir(folder_to_move)\r\n else:\r\n log.info(\"Folder content was moved but folder is not empty.\")\r\n\r\n return unpack_path",
"def search_zip(folder: Path, file_type: str) -> list[Path]:\n result = []\n for folder_item in folder.iterdir():\n # Is it a directory?\n if folder_item.is_dir():\n result.extend(search_zip(folder_item, file_type))\n # Is it a file?\n if folder_item.is_file():\n if folder_item.name.endswith(file_type):\n result.append(folder_item)\n return result",
"def read_foldername(folder):\n match = re.search(r'(\\d+)\\s-\\s(\\d\\d\\d\\d)-(\\d\\d)\\s-\\s(.+).*\\s-\\s(.+)\\s--\\s(.+)', folder)\n if match is not None:\n foldertuple = match.groups()\n else:\n match = re.search(r'(\\d+)\\s-\\s(\\d\\d\\d\\d)-(\\d\\d)\\s-\\s()(.*)\\s--\\s(.+)', folder)\n if match is not None:\n foldertuple = match.groups()\n else:\n match = re.search(r'(\\d+)\\s-\\s(\\d\\d\\d\\d)-(\\d\\d)()()()', folder)\n if match is not None:\n foldertuple = match.groups()\n else:\n foldertuple = None\n return foldertuple",
"def get_folder_hash(cls, folder_dir):\n\n hash_dict = {}\n\n for path, dirs, files in os.walk(folder_dir, topdown=False):\n\n current_dir = path.split('/')[-1]\n\n # extracted folders are never stored\n if '.extract/' in path or current_dir == '.extract':\n continue\n\n files_list = [file for file in files if file != 'hash']\n\n for file in files_list:\n file_path = '%s/%s' % (path, file)\n hash_dict[file_path] = cls.create_file_hash_dict(file, file_path)\n\n filtered_dirs = [directory for directory in dirs if directory != '.extract']\n hash_dict[path] = cls.create_tree_hash_dict(current_dir, path, filtered_dirs, files_list, hash_dict)\n\n return hash_dict[folder_dir]['hash'], hash_dict",
"def unzip_file(zip_path, directory_to_extract_to):\n ensure_dir(directory_to_extract_to)\n with zipfile.ZipFile(file=zip_path) as zip_file:\n # Loop over each file\n for file in tqdm(iterable=zip_file.namelist(), total=len(zip_file.namelist())):\n try:\n zip_file.extract(member=file, path=directory_to_extract_to)\n except BadZipFile as e:\n print(e)",
"def get_folder_filenames(path):\n result = {}\n for parent, dirnames, filenames in os.walk(path):\n if not dirnames:\n result[os.path.split(parent)[-1]] = filenames\n return result",
"def extract(apath, ffilter=[]):\n\n files = []\n\n def extract_recursive(curr_apath):\n \"\"\"Look into archive recursively to extract files considering ffilter\"\"\"\n\n handler = resolve_format(curr_apath)\n unpacker = HandlersFactory.get_handler(handler)\n _files = unpacker.files_list(curr_apath)\n\n for f in _files:\n if is_matched(f, ffilter=ffilter):\n _fpath = unpacker.extract(curr_apath, f)\n files.append(_fpath)\n if is_archive(f):\n _apath = unpacker.extract(curr_apath, f)\n extract_recursive(_apath)\n\n extract_recursive(apath)\n return files",
"def zip_folder(\r\n folder_path: str,\r\n archive_file_name: str = None,\r\n max_file_size: int = None,\r\n excluded_folders: List[str] = None,\r\n compression: int = zipfile.ZIP_STORED,\r\n) -> Optional[str]:\r\n # TODO accept names with wildcards in exclude like for tar\r\n if not os.path.isdir(folder_path):\r\n log.info(\"Failed to zip (not a directory): \" + folder_path)\r\n return None\r\n\r\n temp_folder = tempfile.mkdtemp()\r\n\r\n if max_file_size:\r\n max_file_size = max_file_size * 1000000 # MB ro bytes\r\n\r\n def cleanup():\r\n log.info(\"Removing temp directory: \" + temp_folder)\r\n shutil.rmtree(temp_folder)\r\n\r\n atexit.register(cleanup)\r\n\r\n if not archive_file_name:\r\n archive_file_name = os.path.basename(folder_path) + \".zip\"\r\n\r\n zip_file_path = os.path.join(temp_folder, archive_file_name)\r\n log.debug(\"Zipping folder: \" + folder_path + \" to \" + zip_file_path)\r\n zip_file = zipfile.ZipFile(zip_file_path, \"w\", compression)\r\n\r\n # dont packge folder inside, only package everything inside folder\r\n for dirname, subdirs, files in os.walk(folder_path):\r\n if excluded_folders:\r\n for excluded_folder in excluded_folders:\r\n if excluded_folder in subdirs:\r\n log.debug(\"Ignoring folder because of name: \" + excluded_folder)\r\n subdirs.remove(excluded_folder)\r\n if dirname != folder_path:\r\n # only write if dirname is not the root folder\r\n zip_file.write(dirname, os.path.relpath(dirname, folder_path))\r\n for filename in files:\r\n if max_file_size and max_file_size < os.path.getsize(\r\n os.path.join(dirname, filename)\r\n ):\r\n # do not write file if it is bigger than\r\n log.debug(\"Ignoring file because of file size: \" + filename)\r\n continue\r\n file_path = os.path.join(dirname, filename)\r\n zip_file.write(file_path, os.path.relpath(file_path, folder_path))\r\n zip_file.close()\r\n\r\n return zip_file_path",
"def get_folder_info(path: str):\n try:\n folder_list = next(os.walk(path))\n return (folder_list[1], folder_list[2])\n except TypeError:\n print(TypeError)",
"def zipDirectory(folder_path, zip_path):\r\n # Create a ZipFile object\r\n with ZipFile(zip_path, mode='w') as zipObj:\r\n # Iterate over all the files in directory\r\n for folderName, subfolders, filenames in os.walk(folder_path):\r\n for filename in filenames:\r\n # Filter on TSV files\r\n if filename.endswith(\".tsv\"):\r\n # Create complete filepath of file in directory\r\n filePath = os.path.join(folderName, filename)\r\n # Add file to zip\r\n zipObj.write(filePath, basename(filePath))",
"def extractAll(self, directory):\r\n if self._archive == None:\r\n raise StandardError(\"Archive was not open\")\r\n\r\n self._archive.extractall(directory)",
"def parse_folder(self, path):\n\n for filename in os.listdir(path):\n self.parse_file(os.path.join(path, filename), filename)\n return self.country_dict, self.hre_dict, self.name_dict",
"def zip_and_delete(folder_path, output_path):\n parent_folder = os.path.dirname(folder_path)\n # Retrieve the paths of the folder contents.\n contents = os.walk(folder_path)\n zipped_files = []\n try:\n zip_file = zipfile.ZipFile(output_path, 'w', zipfile.ZIP_DEFLATED)\n for root, folders, files in contents:\n # Include all subfolders, including empty ones.\n for folder_name in folders:\n absolute_path = os.path.join(root, folder_name)\n relative_path = absolute_path.replace(folder_path + '/',\n '')\n print \"Adding '%s' to archive.\" % absolute_path\n zip_file.write(absolute_path, relative_path)\n for file_name in files:\n absolute_path = os.path.join(root, file_name)\n relative_path = absolute_path.replace(folder_path + '/',\n '')\n print \"Adding '%s' to archive.\" % absolute_path\n zip_file.write(absolute_path, relative_path)\n zipped_files.append(absolute_path)\n print \"'%s' created successfully.\" % output_path\n except IOError, message:\n print message\n sys.exit(1)\n except OSError, message:\n print message\n sys.exit(1)\n except zipfile.BadZipfile, message:\n print message\n sys.exit(1)\n finally:\n zip_file.close()\n\n print \"clearing directory: %s\" % folder_path\n shutil.rmtree(folder_path)\n os.mkdir(folder_path)",
"def untar(file_path, extract_folder=None):\n if extract_folder is None:\n extract_folder = os.path.dirname(file_path)\n tar = tarfile.open(file_path)\n tar.extractall(extract_folder)\n tar.close()",
"def zip_folder(folder_path, output_path):\n\n # Note: os.path.relpath() does not exist in Jython.\n # target = os.path.relpath(folder_path, start=os.path.dirname(folder_path))\n target = folder_path[folder_path.rfind(os.sep) + 1:]\n\n # Simple trick to build relative paths\n root_len = folder_path.find(target)\n\n try:\n\n # Open zip file (no compression)\n zip_file = zipfile.ZipFile(output_path, 'w', zipfile.ZIP_STORED, allowZip64=True)\n\n # Now recurse into the folder\n for root, folders, files in os.walk(folder_path):\n\n # We do not process folders. This is only useful to store empty\n # folders to the archive, but 1) jython's zipfile implementation\n # throws:\n #\n # Exception: [Errno 21] Is a directory <directory_name>\n #\n # when trying to write a directory to a zip file (in contrast to \n # Python's implementation) and 2) oBIT does not export empty\n # folders in the first place.\n\n # Build the relative directory path (current root)\n relative_dir_path = os.path.abspath(root)[root_len:]\n\n # If a folder only contains a subfolder, we disrupt the hierarchy,\n # unless we add a file.\n if len(files) == 0:\n touch(os.path.join(root, '~'))\n files.append('~')\n\n # Include all files\n for file_name in files:\n\n # Full file path to add\n full_file_path = os.path.join(root, file_name)\n relative_file_path = os.path.join(relative_dir_path, file_name)\n\n # Workaround problem with file name encoding\n full_file_path = full_file_path.encode('latin-1')\n relative_file_path = relative_file_path.encode('latin-1')\n\n # Write to zip\n zip_file.write(full_file_path, relative_file_path, \\\n zipfile.ZIP_STORED)\n\n except IOError, message:\n raise Exception(message)\n\n except OSError, message:\n raise Exception(message)\n\n except zipfile.BadZipfile, message:\n raise Exception(message)\n\n finally:\n zip_file.close()"
] | [
"0.6846293",
"0.6732933",
"0.61467665",
"0.60470045",
"0.60425836",
"0.5991757",
"0.5988355",
"0.5841082",
"0.5742796",
"0.5741708",
"0.56827486",
"0.5606064",
"0.5600333",
"0.5590889",
"0.55497",
"0.5535603",
"0.5427062",
"0.53971833",
"0.5387293",
"0.531985",
"0.5301058",
"0.52950704",
"0.5292896",
"0.5287323",
"0.52541316",
"0.5248175",
"0.5247449",
"0.5232431",
"0.5223215",
"0.5209623"
] | 0.78496677 | 0 |
ExtractLGP(file, dir=None) Extracts the contents of a LGP archive in a folder. Returns the resulting directory. | def ExtractLGP(file, dir=None):
if dir is None:
p, f = GetFile(file)
dir = var.BOOTLEG_TEMP + f
subprocess.Popen([var.ULGP_LOCATION, "-x", file, "-C", dir])
return dir | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def RepackLGP(dir, file=None):\n\n if file is None:\n p, f = GetFile(dir)\n if f.endswith((\"/\", \"\\\\\")):\n f = f[:-1]\n file = var.BOOTLEG_TEMP + f + \".lgp\"\n subprocess.Popen([var.ULGP_LOCATION, \"-c\", file, \"-C\", dir])\n return file",
"def download_extract(name, folder=None): #@save\n fname = download(name)\n base_dir = os.path.dirname(fname)\n data_dir, ext = os.path.splitext(fname)\n if ext == '.zip':\n fp = zipfile.ZipFile(fname, 'r')\n elif ext in ('.tar', '.gz'):\n fp = tarfile.open(fname, 'r')\n else:\n assert False, 'Only zip/tar files can be extracted.'\n fp.extractall(base_dir)\n return os.path.join(base_dir, folder) if folder else data_dir",
"def download_extract(name, folder=None):\n fname = download(name)\n base_dir = os.path.dirname(fname)\n data_dir, ext = os.path.splitext(fname)\n if ext == '.zip':\n fp = zipfile.ZipFile(fname, 'r')\n elif ext in ('.tar', '.gz'):\n fp = tarfile.open(fname, 'r')\n else:\n assert False, 'Only zip/tar files can be extracted.'\n fp.extractall(base_dir)\n return os.path.join(base_dir, folder) if folder else data_dir",
"def extract_from_dir(directory):\n image_regex = re.compile(r'.+\\.jpeg$')\n for root, _, files in os.walk(directory):\n for name in files:\n if image_regex.match(name) != None:\n filename = os.path.join(root, name)\n image = io.imread(filename)\n no_ext, _ = os.path.splitext(name)\n features = extract(image)\n yield (no_ext, features)",
"def _extract_all_gz_in_dir(directory):\n for file in os.listdir(directory):\n if file.endswith(\".gz\"):\n gz_fname = os.path.join(directory, file)\n _extract_gz(gz_fname, os.path.dirname(gz_fname))\n os.remove(gz_fname)",
"def get_gff(url, dir):\n zip_path = ftp_download(url, dir)\n unzip_path = ungzip(zip_path)\n return unzip_path",
"def _extract_archive(path: str, extracted_dir_path: str) -> str:\n logging.info('extracting %s to %s', path, extracted_dir_path)\n with tarfile.open(path) as tar:\n tar.extractall(path=extracted_dir_path)\n extracted_items = os.listdir(extracted_dir_path)\n if len(extracted_items) != 1:\n raise ValueError(\n 'archive at {} did not contain a single directory'.format(path))\n return os.path.join(extracted_dir_path, extracted_items[0])",
"def file_unzipper(directory):\n debug.log(\"Unzipping directory (%s)...\"%directory)\n #FINDING AND UNZIPPING ZIPPED FILES\n for root, dirs, files in os.walk(directory, topdown=False):\n if root != \"\":\n orig_dir = os.getcwd()\n os.chdir(directory)\n Popen('gunzip -q -f *.gz > /dev/null 2>&1', shell=True).wait()\n Popen('unzip -qq -o \"*.zip\" > /dev/null 2>&1', shell=True).wait()\n Popen('rm -f *.zip > /dev/null 2>&1', shell=True).wait()\n os.chdir(orig_dir)",
"def fromDirectory(path, bbox=False, tbox=False, details=False):\n\n logger.info(\"Extracting bbox={} tbox={} from Directory {}\".format(bbox, tbox, path))\n\n if not bbox and not tbox:\n logger.error(\"Require at least one of extraction options, but bbox is {} and tbox is {}\".format(bbox, tbox))\n raise Exception(\"No extraction options enabled!\")\n metadata = {}\n # initialization of later output dict\n metadata_directory = {}\n\n isZip = zipfile.is_zipfile(path)\n\n if isZip:\n logger.info(\"Inspecting zipfile {}\".format(path))\n hf.extract_zip(path)\n extract_folder = path[:-4]\n logger.info(\"Extract_folder zipfile {}\".format(extract_folder))\n path = extract_folder\n\n for filename in os.listdir(path):\n logger.info(\"path {}, folder/zipfile {}\".format(path, filename))\n isZip = zipfile.is_zipfile(os.path.join(path, filename))\n if isZip:\n logger.info(\"**Inspecting folder {}, is zip ? {}**\".format(filename, str(isZip)))\n metadata_directory[filename] = fromDirectory(os.path.join(path, filename), bbox, tbox, details=True)\n else:\n logger.info(\"Inspecting folder {}, is zip ? {}\".format(filename, str(isZip)))\n if os.path.isdir(os.path.join(path, filename)):\n metadata_directory[filename] = fromDirectory(os.path.join(path, filename), bbox, tbox, details=True)\n else:\n metadata_file = fromFile(os.path.join(path, filename), bbox, tbox)\n metadata_directory[str(filename)] = metadata_file\n\n file_format = \"zip\" if isZip else 'folder'\n metadata['format'] = file_format\n\n if bbox:\n bbox_ext = hf.bbox_merge(metadata_directory, path)\n if bbox_ext is not None:\n if len(bbox_ext) != 0:\n metadata['crs'] = bbox_ext['crs']\n metadata['bbox'] = bbox_ext['bbox']\n else:\n logger.warning(\n \"The {} {} has no identifiable bbox - Coordinate reference system (CRS) may be missing\".format(\n file_format, path))\n\n if tbox:\n tbox_ext = hf.tbox_merge(metadata_directory, path)\n if tbox_ext is not None:\n metadata['tbox'] = tbox_ext\n else:\n logger.warning(\"The {} {} has no identifiable time extent\".format(file_format, path))\n\n if details:\n metadata['details'] = metadata_directory\n\n return metadata",
"def unpack_archive(\n filepath: types.PathLike, *, extract_dir: Optional[types.PathLike] = None\n) -> types.PathLike:\n filepath = utils.to_path(filepath).resolve()\n if not extract_dir:\n extract_dir = str(filepath.parent)\n filepath = str(filepath)\n os.makedirs(extract_dir, exist_ok=True)\n is_zipfile = zipfile.is_zipfile(filepath)\n is_tarfile = tarfile.is_tarfile(filepath)\n if not is_zipfile and not is_tarfile:\n LOGGER.debug(\"'%s' is not an archive\", filepath)\n return extract_dir\n else:\n LOGGER.info(\"extracting data from archive file '%s'\", filepath)\n shutil.unpack_archive(filepath, extract_dir=extract_dir, format=None)\n # we want to rename the unpacked directory to a consistent value\n # unfortunately, shutil doesn't pass this back to us\n # so, we get the root path of all the constituent members\n if is_zipfile:\n with zipfile.ZipFile(filepath, mode=\"r\") as zf:\n members = zf.namelist()\n else:\n with tarfile.open(filepath, mode=\"r\") as tf:\n members = tf.getnames()\n src_basename = os.path.commonpath(members)\n dest_basename = os.path.basename(filepath)\n if src_basename:\n while True:\n tmp, _ = os.path.splitext(dest_basename)\n if tmp == dest_basename:\n break\n else:\n dest_basename = tmp\n if src_basename != dest_basename:\n return shutil.move(\n os.path.join(extract_dir, src_basename),\n os.path.join(extract_dir, dest_basename),\n )\n else:\n return os.path.join(extract_dir, src_basename)\n else:\n return extract_dir",
"def unzip_file(path_to_zip_file, directory_to_extract_to):\n \n with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:\n zip_ref.extractall(directory_to_extract_to)\n\n return",
"def extract(file, fileFormat):\n\tspeech.speak(\"Extracting files in \" + file + \".\")\n\tpatoolib.extract_archive(file)",
"def extract_from_dir(directory, importer=import_module):\n\n files = listdir(directory)\n modules = []\n\n head_package = directory.replace('/', '.')\n\n for file in filter(lambda file_name: has_extension(file_name, PY_FILE_EXTS)\n and not file_name.startswith('_'), files):\n package_name = head_package + '.' + remove_extension(file)\n\n modules.append(importer(package_name))\n\n return modules",
"def fetch_data(directory_to_extract_to):\n targetdir = os.path.join(directory_to_extract_to, \"PAMAP2\")\n if os.path.exists(targetdir):\n print('Data previously downloaded and stored in ' + targetdir)\n else:\n os.makedirs(targetdir) # create target directory\n # Download the PAMAP2 data, this is 688 Mb\n path_to_zip_file = os.path.join(directory_to_extract_to, 'PAMAP2_Dataset.zip')\n test_file_exist = os.path.isfile(path_to_zip_file)\n if test_file_exist is False:\n url = str('https://archive.ics.uci.edu/ml/' +\n 'machine-learning-databases/00231/PAMAP2_Dataset.zip')\n # retrieve data from url\n local_fn, headers = urllib.request.urlretrieve(url,\n filename=path_to_zip_file)\n print('Download complete and stored in: ' + path_to_zip_file)\n else:\n print('The data was previously downloaded and stored in ' +\n path_to_zip_file)\n # unzip\n\n with zipfile.ZipFile(path_to_zip_file, \"r\") as zip_ref:\n zip_ref.extractall(targetdir)\n os.remove(path_to_zip_file)\n return targetdir",
"def ExtractFile(file, dst=None, pw=None):\n\n path, file = FindFile(file)\n\n if file.endswith(\".rar\"):\n type = \"rar\"\n elif file.endswith((\".zip\", \".7z\")):\n type = \"zip\"\n else:\n type = None\n\n if dst is None:\n dst = file\n if not dst.endswith((\"/\", \"\\\\\")):\n dst = dst + \"\\\\\"\n if not path.endswith((\"/\", \"\\\\\")):\n path = path + \"\\\\\"\n\n if pw is None:\n pw = \"none\"\n\n if type == \"rar\": # Rar file\n subprocess.Popen([var.RAR_LOCATION, \"x\", \"-y\", \"-p\" + pw, path+file, var.BOOTLEG_TEMP + dst]).wait()\n elif type == \"zip\": # Zip file\n subprocess.Popen([var.SEVENZ_LOCATION, \"x\", \"-p\" + pw, \"-y\", \"-o\" + var.BOOTLEG_TEMP + dst, path + file]).wait()\n else: # No type, just copy it over\n shutil.copy(path + file, var.BOOTLEG_TEMP + dst + file)\n\n log.logger(\"PARS_EXTR_FILE\", format=[path + file], display=False)\n return var.BOOTLEG_TEMP + dst",
"def extract_file(path):",
"def extract(cls, path, outdir):\r\n raise NotImplementedError()",
"def download_preprocessed_data(directory_to_extract_to):\n data_path = os.path.join(directory_to_extract_to,\n 'data', 'PAMAP2', 'preprocessed')\n\n if not os.path.isdir(data_path):\n path_to_zip_file = os.path.join(directory_to_extract_to, 'data.zip')\n\n # Download zip file with data\n if not os.path.isfile(path_to_zip_file):\n print(\"Downloading data...\")\n local_fn, headers = urllib.request.urlretrieve(\n 'https://zenodo.org/record/834467/files/data03.zip',\n filename=path_to_zip_file)\n else:\n print(\"Data already downloaded\")\n # Extract the zip file\n with zipfile.ZipFile(path_to_zip_file, \"r\") as zip_ref:\n print(\"Extracting data...\")\n zip_ref.extractall(directory_to_extract_to)\n os.rename(os.path.join(directory_to_extract_to, 'data03'),\n os.path.join(directory_to_extract_to, 'data'))\n print(\"Done\")\n else:\n print(\"Data already downloaded and extracted.\")\n\n return data_path",
"def extract(cls, path, outdir):\r\n with open_zip(path) as zip:\r\n for path in zip.namelist():\r\n # While we're at it, we also perform this safety test.\r\n if path.startswith('/') or path.startswith('..'):\r\n raise ValueError('Zip file contains unsafe path: %s' % path)\r\n # Ignore directories. extract() will create parent dirs as needed.\r\n if not path.endswith('/'):\r\n zip.extract(path, outdir)",
"def extract_zip(file, extract_location):\n\n with zipfile.ZipFile(file, \"r\") as zip_ref:\n zip_ref.extractall(extract_location)\n\n print(f\"Extracted file to {extract_location}\")",
"def handle_tmp_extracted(dir: str):\n if not dir.startswith(\"/\"):\n dir = os.path.absname(dir)\n import_name = os.path.basename(dir)\n sub_dir = f\"{dir}/Takeout\"\n assert os.path.isdir(dir) and os.path.isdir(sub_dir)\n assert os.path.isdir(target_annex_dir)\n old_cwd = os.getcwd()\n os.chdir(target_annex_dir)\n for name in os.listdir(sub_dir):\n cmd(\"git-annex\", \"import\", \"--force\", f\"{sub_dir}/{name}\")\n if not is_git_state_clean():\n cmd(\"git\", \"commit\", \"-m\", f\"import Google Takeout {import_name} / {name}\")\n\n os.chdir(old_cwd)\n print(\"Temp extracted dir was:\", dir)",
"def getFiles(directory):\n # os.listdir only for locally downloaded files\n _files=[]\n for item in os.listdir(directory):\n path = os.path.join(directory, item)\n if not os.path.isdir(path) and \".lhe.gz\" in path:\n _files.append(path)\n elif os.path.isdir(path):\n getFiles(path)\n return _files",
"def unzip_file(path_to_zip_file: str, dir_to_extract_to: str) -> str:\n with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:\n zip_ref.extractall(dir_to_extract_to)\n return f'{dir_to_extract_to}/{zip_ref.namelist()[0]}'",
"def unzip_file(zip_path, directory_to_extract_to):\n ensure_dir(directory_to_extract_to)\n with zipfile.ZipFile(file=zip_path) as zip_file:\n # Loop over each file\n for file in tqdm(iterable=zip_file.namelist(), total=len(zip_file.namelist())):\n try:\n zip_file.extract(member=file, path=directory_to_extract_to)\n except BadZipFile as e:\n print(e)",
"def gunzip_file(gzip_file, base_dir):\n full_gzip_file = os.path.join(base_dir, gzip_file)\n if not gzip_file.endswith(\".gz\"):\n return gzip_file\n gunzip_file = full_gzip_file.replace(\".gz\", \"\")\n with gzip.open(full_gzip_file, 'rb') as f_in:\n with open(gunzip_file, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n # Now that file is extracted. Remove file\n os.remove(full_gzip_file)\n\n return os.path.basename(gunzip_file)",
"def extractMember(self, directory, name):\r\n if self._archive == None:\r\n raise StandardError(\"Archive was not open\")\r\n\r\n self._archive.extract(name, directory)",
"def extract_file(self, filename):\n unp_bin = os.path.join(self.modulebin, 'unp')\n\n filepath = os.path.dirname(filename)\n uncompressed = ['fasta', 'fa', 'fastq', 'fq', 'fna', 'h5' ]\n supported = ['tar.gz', 'tar.bz2', 'bz2', 'gz', 'lz',\n 'rar', 'tar', 'tgz','zip']\n for ext in uncompressed:\n if filename.endswith('.'+ext):\n return filename\n for ext in supported:\n if filename.endswith('.'+ext):\n extracted_file = filename[:filename.index(ext)-1]\n if os.path.exists(extracted_file): # Check extracted already\n return extracted_file\n logger.info(\"Extracting {}...\".format(filename))\n # p = subprocess.Popen([unp_bin, filename],\n # cwd=filepath, stderr=subprocess.STDOUT)\n # p.wait()\n # Hide the \"broken pipe\" message from unp\n out = subprocess.Popen([unp_bin, filename],\n cwd=filepath,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT).communicate()[0]\n if os.path.exists(extracted_file):\n return extracted_file\n else:\n logger.error(\"Extraction of {} failed: {}\".format(filename, out))\n raise Exception('Archive structure error')\n logger.error(\"Could not extract {}\".format(filename))\n return filename",
"def download_extract(database_name, data_path):\r\n DATASET_ML1M = 'ml-1m'\r\n\r\n if database_name == DATASET_ML1M:\r\n url = 'http://files.grouplens.org/datasets/movielens/ml-1m.zip'\r\n hash_code = 'c4d9eecfca2ab87c1945afe126590906'\r\n extract_path = os.path.join(data_path, 'ml-1m')\r\n save_path = os.path.join(data_path, 'ml-1m.zip')\r\n extract_fn = _unzip\r\n\r\n if os.path.exists(extract_path):\r\n print('Found {} Data'.format(database_name))\r\n return\r\n\r\n if not os.path.exists(data_path):\r\n os.makedirs(data_path)\r\n\r\n if not os.path.exists(save_path):\r\n with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Downloading {}'.format(database_name)) as pbar:\r\n urlretrieve(\r\n url,\r\n save_path,\r\n pbar.hook)\r\n\r\n assert hashlib.md5(open(save_path, 'rb').read()).hexdigest() == hash_code, '{} file is corrupted. Remove the file and try again.'.format(save_path)\r\n\r\n os.makedirs(extract_path)\r\n try:\r\n extract_fn(save_path, extract_path, database_name, data_path)\r\n except Exception as err:\r\n shutil.rmtree(extract_path) # Remove extraction folder if there is an error\r\n raise err\r\n\r\n print('Done.')\r\n # Remove compressed data\r",
"def from_directory(cls, dirpath, verbose=True):\n if dirpath[-1] == '/':\n dirpath = dirpath[:-1]\n mesh_hash = os.path.splitext(os.path.basename(dirpath))[0]\n prepath = dirpath[:dirpath.rfind(mesh_hash)]\n assert prepath[-1] == '/'\n prepath = prepath[:-1]\n split, synset = prepath.split('/')[-2:]\n ex = cls(split=split, synset_or_cat=synset,\n mesh_hash=mesh_hash, dynamic=True, verbose=verbose)\n # pylint: disable=protected-access\n ex._tx_path = f'{dirpath}/orig_to_gaps.txt'\n ex._dodeca_depth_and_normal_path = f'{dirpath}/depth_and_normals.npz'\n ex._gt_path = f'{dirpath}/mesh_orig.ply'\n ex._directory_root = dirpath\n ex._grid_path = f'{dirpath}/coarse_grid.grd'\n # pylint: enable=protected-access\n ex.precomputed_surface_samples_from_dodeca_path = (\n f'{dirpath}/surface_samples_from_dodeca.pts'\n )\n ex.is_from_directory = True\n return ex",
"def get_cpgs(url, dir) -> str:\n r = requests.get(url)\n if r.status_code == 200:\n filename = r.headers['Content-Disposition'].split(\"=\")[-1]\n with open(dir + filename, 'w+') as f:\n f.write(r.text)\n return dir + filename"
] | [
"0.6670236",
"0.5393562",
"0.5357665",
"0.5296923",
"0.5217785",
"0.5009716",
"0.49887457",
"0.48910058",
"0.48338923",
"0.47894818",
"0.47348577",
"0.46446905",
"0.4632599",
"0.46304503",
"0.4612503",
"0.4566002",
"0.45542613",
"0.4552373",
"0.45423222",
"0.4542051",
"0.45198387",
"0.4512196",
"0.45105842",
"0.4491611",
"0.44822934",
"0.4474729",
"0.44743934",
"0.4472269",
"0.44689327",
"0.4464662"
] | 0.7894865 | 0 |
RepackLGP(dir, file=None) Packs the contents of a folder into a LGP archive. Returns the resulting file. | def RepackLGP(dir, file=None):
if file is None:
p, f = GetFile(dir)
if f.endswith(("/", "\\")):
f = f[:-1]
file = var.BOOTLEG_TEMP + f + ".lgp"
subprocess.Popen([var.ULGP_LOCATION, "-c", file, "-C", dir])
return file | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ExtractLGP(file, dir=None):\n\n if dir is None:\n p, f = GetFile(file)\n dir = var.BOOTLEG_TEMP + f\n subprocess.Popen([var.ULGP_LOCATION, \"-x\", file, \"-C\", dir])\n return dir",
"def pack(file_path, extension):\n package_dir = file_path.split('.')[0] + '.' + extension\n print 'package_dir', package_dir\n name = file_path.split('/')[-1]\n\n if extension in ['tar', 'tar.gz', 'tgz', 'tar.bz2']:\n # tar file\n mode = ''\n if extension in ['tar.gz', 'tgz']:\n mode = 'gz'\n elif extension in ['tar.bz2']:\n mode = 'bz2'\n with tarfile.open(name=package_dir, mode='w:%s' % mode) as tar:\n tar.add(file_path, arcname=name)\n elif extension in ['zip']:\n with zipfile.ZipFile(b, 'w') as zf:\n zf.write(file_path, arcname=name)\n else:\n pass\n\n return package_dir",
"def recursive_unpack(dir_path):\n exten = ['7z', 'zip', 'rar']\n one_more = False\n for r, d, files in os.walk(dir_path):\n packed = []\n for ext in exten:\n code_files = fnmatch.filter(files, '*.' + ext)\n if len(code_files) > 0:\n tmp_paths = [os.path.join(os.path.abspath(r), f) for f in code_files]\n packed.extend(tmp_paths)\n if not one_more and len(packed) > 0:\n one_more = True\n if len(packed) > 0:\n print(\"unpack list:\", packed)\n for p in packed:\n extract(p, os.path.dirname(p))\n os.remove(p)\n if one_more:\n recursive_unpack(dir_path)",
"def pack_file(zip_write, filename: str, suppress_error=False):\n if '\\t' in filename:\n # We want to rename the file!\n filename, arcname = filename.split('\\t')\n else:\n arcname = filename\n\n if filename[-1] == '*':\n # Pack a whole folder (blah/blah/*)\n directory = filename[:-1]\n file_count = 0\n for poss_path in RES_ROOT:\n dir_path = os.path.normpath(\n os.path.join(poss_path, directory)\n )\n if not os.path.isdir(dir_path):\n continue\n for subfile in os.listdir(dir_path):\n full_path = os.path.join(dir_path, subfile)\n rel_path = os.path.join(directory, subfile)\n zip_write(\n filename=full_path,\n arcname=rel_path,\n )\n file_count += 1\n LOGGER.info('Packed {} files from folder \"{}\"', file_count, directory)\n return\n\n for poss_path in RES_ROOT:\n full_path = os.path.normpath(\n os.path.join(poss_path, filename)\n )\n if os.path.isfile(full_path):\n zip_write(\n filename=full_path,\n arcname=arcname,\n )\n break\n else:\n if not suppress_error:\n LOGGER.warning(\n '\"bee2/' + filename + '\" not found! (May be OK if not custom)'\n )",
"def pack_contents(file, d, implementor=None):\n d = realpath(d) # avoid symlink confusion in pack_ex\n return _pack_ex(file, os.listdir(d), d, implementor)",
"def merge_directory(manager, directory, name, debug):\n set_debug_param(debug)\n\n name = name or '{}-merged.gpickle'.format(directory)\n path = os.path.join(directory, name)\n if os.path.exists(path):\n click.echo('Path already exists. Quitting. [{}]'.format(path))\n\n from . import from_directory\n from pybel import to_pickle\n\n enable_cool_mode()\n\n graph = from_directory(directory, connection=manager)\n to_pickle(graph, file=path)",
"def packDir(self, path='', recursive=True, autorotate=False, debug=False):\n\n Console.info('Packing sprites in: %s' % os.path.join(self.base, path))\n Console.indent()\n \n self.files = []\n self.addDir(path, recursive=recursive)\n Console.info('Found %d images' % len(self.files))\n\n if len(self.files) > 0:\n self.generate(path, autorotate, debug)\n \n Console.outdent()",
"def targz_folder(folder, archive_name=None):\n if not archive_name:\n archive_name = \"{}.tar.gz\".format(folder)\n with tarfile.open(archive_name, \"w:gz\") as tar:\n tar.add(folder, arcname=os.path.basename(folder))\n return archive_name",
"def unpack(self, dir, filters=None):\n if self.is_dir():\n raise GbpError(\"Cannot unpack directory %s\" % self.path)\n\n if not filters:\n filters = []\n\n if not isinstance(filters, list):\n raise GbpError(\"Filters must be a list\")\n\n self._unpack_archive(dir, filters)\n self.unpacked = self._unpacked_toplevel(dir)",
"def scan_repack_dir(file_list):\n\t# TODO: Figure out elegant way to incorporate size comparison to this without any extra loops\n\tfiles_to_repack = [ item for item in os.listdir(\"repack\") if os.path.isfile(os.path.join(\"repack\", item)) ] # Aww yiss, list comprehensions\n\tfor file in file_list:\n\t\tif file.filename in files_to_repack:\n\t\t\tfile.should_repack = True\n\t\t\tsize_difference = compare_file_size(file.filename, file.size)\n\t\t\tif size_difference:\n\t\t\t\tfile.new_size = file.size + size_difference",
"def compress_directory(directory, filename):\r\n mode = 'w:gz'\r\n name = path(directory).name\r\n with tarfile.open(filename, mode) as tar_file:\r\n tar_file.add(directory, arcname=name)",
"def gunzip_file(gz_path, new_path):\n print(\"Unpacking %s to %s\" % (gz_path, new_path))\n with gzip.open(gz_path, \"rb\") as gz_file:\n with open(new_path, \"wb\") as new_file:\n for line in gz_file:\n new_file.write(line)",
"def gunzip_file(gz_path, new_path):\n print(\"Unpacking %s to %s\" % (gz_path, new_path))\n with gzip.open(gz_path, \"rb\") as gz_file:\n with open(new_path, \"wb\") as new_file:\n for line in gz_file:\n new_file.write(line)",
"def gunzip_file(gz_path, new_path):\n print(\"Unpacking %s to %s\" % (gz_path, new_path))\n with gzip.open(gz_path, \"rb\") as gz_file:\n with open(new_path, \"wb\") as new_file:\n for line in gz_file:\n new_file.write(line)",
"def gunzip_file(gz_path, new_path):\n logging.info(\"Unpacking %s to %s\" % (gz_path, new_path))\n with gzip.open(gz_path, \"rb\") as gz_file:\n with open(new_path, \"wb\") as new_file:\n for line in gz_file:\n new_file.write(line)",
"def _pack_dir(\n source_dir: str,\n exclude: Optional[List] = None,\n files_stats: Optional[Dict[str, Tuple[float, int]]] = None,\n) -> io.BytesIO:\n\n def _should_exclude(candidate: str) -> bool:\n if not exclude:\n return False\n\n for excl in exclude:\n if fnmatch.fnmatch(candidate, excl):\n return True\n return False\n\n stream = io.BytesIO()\n with tarfile.open(fileobj=stream, mode=\"w\", format=tarfile.PAX_FORMAT) as tar:\n\n if not files_stats and not exclude:\n # If no `files_stats` is passed, pack whole directory\n tar.add(source_dir, arcname=\"\", recursive=True)\n else:\n files_stats = files_stats or {}\n # Otherwise, only pack differing files\n tar.add(source_dir, arcname=\"\", recursive=False)\n for root, dirs, files in os.walk(source_dir, topdown=False):\n rel_root = os.path.relpath(root, source_dir)\n # Always add all directories\n for dir in dirs:\n key = os.path.join(rel_root, dir)\n tar.add(os.path.join(source_dir, key), arcname=key, recursive=False)\n # Add files where our information differs\n for file in files:\n key = os.path.join(rel_root, file)\n stat = os.lstat(os.path.join(source_dir, key))\n file_stat = stat.st_mtime, stat.st_size\n\n if _should_exclude(key):\n # If the file matches an exclude pattern, skip\n continue\n\n if key in files_stats and files_stats[key] == file_stat:\n # If the file did not change, skip\n continue\n\n tar.add(os.path.join(source_dir, key), arcname=key)\n\n return stream",
"def pack(filename: Union[str, Path], source_dir: Union[str, Path]) -> None:\n raise NotImplemented",
"def pack_single(file, path, implementor=None):\n path = realpath(path) # avoid symlink confusion in pack_ex\n return _pack_ex(file, [path], dirname(path), implementor)",
"def organize_folder(self, folder):\r\n\r\n # checking whether folder exist\r\n if not os.path.exists(folder):\r\n sys.exit()\r\n\r\n # splitting name and extension\r\n photo_to_keep = []\r\n for root, dirs, files in os.walk(folder):\r\n for file in files:\r\n name = file.split('.')\r\n if name[1] in ('jpg', 'JPG'):\r\n photo_to_keep.append(name[0])\r\n for root, dirs, files in os.walk(folder):\r\n for file in files:\r\n name = file.split('.')\r\n if name[1] in ('nef', 'NEF'):\r\n if name[0] in photo_to_keep:\r\n # if we are under raw folder, do nothing\r\n if os.pathname.basename != 'raw':\r\n rawfolder = '%s/raw' % root\r\n try:\r\n os.mkdir(rawfolder)\r\n except OSError:\r\n # if raw folder is already present, use it\r\n pass\r\n if os.path.exists(os.path.join(rawfolder, file)):\r\n os.remove(os.path.join(root, file))\r\n else:\r\n os.rename(os.path.join(root, file),\r\n os.path.join(rawfolder, file))\r\n else:\r\n os.remove(os.path.join(root, file))",
"def fpack (filename):\n\n try:\n\n # fits check if extension is .fits and not an LDAC fits file\n if filename.split('.')[-1] == 'fits' and '_ldac.fits' not in filename:\n header = read_hdulist(filename, get_data=False, get_header=True,\n ext_name_indices=0)\n\n # check if it is an image\n if int(header['NAXIS'])==2:\n # determine if integer or float image\n if int(header['BITPIX']) > 0:\n cmd = ['fpack', '-D', '-Y', '-v', filename]\n else:\n if 'Scorr' in filename or 'limmag' in filename:\n quant = 2\n elif 'Fpsf' in filename:\n quant = 4\n else:\n quant = 16\n\n cmd = ['fpack', '-q', str(quant), '-D', '-Y', '-v', filename]\n\n\n # if output fpacked file already exists, delete it\n filename_packed = '{}.fz'.format(filename)\n if isfile(filename_packed):\n #os.remove(filename_packed)\n remove_files([filename_packed])\n log.warning ('fpacking over already existing file {}'\n .format(filename_packed))\n\n subprocess.run(cmd)\n filename = filename_packed\n\n\n except Exception as e:\n #log.exception (traceback.format_exc())\n log.exception ('exception was raised in fpacking of image {}: {}'\n .format(filename,e))\n\n\n return filename",
"def backupToZip(folder):\n\n folder = os.path.abspath(folder) #Ensure we're using the absolute path\n number = 1\n\n while True:\n zipFilename = os.path.basename(folder) + '_' + str(number) + '.zip'\n if not os.path.exists(zipFilename):\n break\n number += 1\n\n #Create the zip file\n print('Creating %s...' % (zipFilename))\n backupZip = zipfile.ZipFile(zipFilename,'w')\n\n #Walk the directory tree and compress the files in each folder\n for foldername, subfolders, filenames in os.walk(folder):\n print('Adding files in %s...' % (foldername))\n\n #Add current folder to the zip file\n backupZip.write(foldername)\n\n #Add all files in this folder to the zip file\n for file in filenames:\n newBase = os.path.basename(folder) + '_'\n if file.startswith(newBase) and file.endswith('zip'):\n continue #Don't back up zip files\n backupZip.write(os.path.join(foldername, file))\n backupZip.close()\n print('Done.')",
"def archive_directory(dir_: str, tar_path: str):\n with tarfile.open(tar_path, 'w', encoding='utf-8') as tar:\n tar.add(dir_, arcname=os.path.sep)",
"def unpack_dir(indir, outdir, bands=None, clouds=None):\r\n archives = glob.glob(indir + '*.tar.gz')\r\n count = len(archives)\r\n for idx, archive in enumerate(archives):\r\n # Determine the outpath directory name for the unpacked landsat archive\r\n unpackDir = outdir + os.path.splitext(os.path.split(\r\n os.path.splitext(archive)[0])[1])[0]\r\n\r\n # Check if the directory already exists and make it if it doesn't\r\n if not os.path.exists(unpackDir):\r\n os.makedirs(unpackDir)\r\n\r\n # Unpack the current archive.\r\n unpack_landsat(archive, unpackDir, bands=bands,clouds=clouds)\r\n\r\n # Let the user know how progress is going.\r\n print(archive + ' unpacked (' + str(idx + 1) + ' of ' + str(count) + ')')",
"def do_pack():\n\n now = datetime.now()\n # format the name of the file with the timestamps\n now_year = now.year\n now_month = now.month\n now_day = now.day\n now_hour = now.hour\n now_minute = now.minute\n now_second = now.second\n # apply the format\n file_name = 'versions/web_static_{}{}{}{}{}{}.tgz'.format(\n now_year, now_month, now_day, now_hour, now_minute, now_second\n )\n # All archives must be stored in the folder versions\n local('mkdir -p versions')\n # execute locally the compression of the folder\n command = local(\"tar -cvzf \" + file_name + \" ./web_static/\")\n # return the archive path if the archive has been correctly generated\n if command.succeeded:\n return file_name\n else:\n return None",
"def zip_file(src_dir):\n zip_name = slugify(src_dir) + '.zip'\n z = zipfile.ZipFile(zip_name, 'w', zipfile.ZIP_DEFLATED)\n for dirpath, dirnames, filenames in os.walk(src_dir):\n fpath = dirpath.replace(src_dir, '')\n fpath = fpath and fpath + os.sep or ''\n for filename in filenames:\n z.write(os.path.join(dirpath, filename), fpath + filename)\n z.close()",
"def _unpack_archive(self, dir, filters):\n ext = os.path.splitext(self.path)[1]\n if ext in [\".zip\", \".xpi\"]:\n if filters:\n raise GbpError(\"Can only filter tar archives: %s\", (ext, self.path))\n self._unpack_zip(dir)\n else:\n self._unpack_tar(dir, filters)",
"def _extract_all_gz_in_dir(directory):\n for file in os.listdir(directory):\n if file.endswith(\".gz\"):\n gz_fname = os.path.join(directory, file)\n _extract_gz(gz_fname, os.path.dirname(gz_fname))\n os.remove(gz_fname)",
"def zipdir(path, file_name):\n length = len(path)\n zipf = zipfile.ZipFile('output/'+f'Test_{file_name}.pptx', 'w', zipfile.ZIP_DEFLATED)\n for root, dirs, files in os.walk(path):\n folder = root[length:] # path without \"parent\"\n for file in files:\n zipf.write(os.path.join(root, file), os.path.join(folder, file))\n zipf.close()\n return",
"def _pack_ex(file, names, cwd, implementor=None):\n assert isdir(cwd)\n if exists(file):\n console.rm(file)\n if not implementor: implementor = GzipTarredFile\n \n with console.cd(cwd):\n relnames = [relpath(name, cwd) for name in names]\n implementor.pack(relnames, file)\n return file",
"def file_unzipper(directory):\n debug.log(\"Unzipping directory (%s)...\"%directory)\n #FINDING AND UNZIPPING ZIPPED FILES\n for root, dirs, files in os.walk(directory, topdown=False):\n if root != \"\":\n orig_dir = os.getcwd()\n os.chdir(directory)\n Popen('gunzip -q -f *.gz > /dev/null 2>&1', shell=True).wait()\n Popen('unzip -qq -o \"*.zip\" > /dev/null 2>&1', shell=True).wait()\n Popen('rm -f *.zip > /dev/null 2>&1', shell=True).wait()\n os.chdir(orig_dir)"
] | [
"0.59195614",
"0.5775735",
"0.5496996",
"0.5278678",
"0.51489365",
"0.5020867",
"0.49108347",
"0.49014816",
"0.48940563",
"0.48889828",
"0.4886973",
"0.48255393",
"0.48208666",
"0.48069724",
"0.47919792",
"0.46869788",
"0.46493793",
"0.4645264",
"0.464379",
"0.46393862",
"0.45730013",
"0.45317292",
"0.45242932",
"0.45087698",
"0.450422",
"0.44751847",
"0.44652286",
"0.44632405",
"0.4450945",
"0.44452104"
] | 0.7751973 | 0 |
LaunchFile(file, params) Runs a raw executable file. The parameters are to feed to the process. Can be multiple parameters. Returns the process' return code. | def LaunchFile(*params):
file = subprocess.Popen(params)
file.communicate()
return file.returncode | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ExecuteFile(*args): # the docstring lies about parameters\n\n folder, file = FindFile(args[0])\n params = args[1:]\n\n log.logger(\"PARS_EXEC_FILE\", format=[file, folder[:-1], params], display=False)\n process = subprocess.Popen([folder + file] + list(params))\n process.communicate()\n return process.returncode",
"def execute(file_path):\n os.startfile(file_path)",
"def run_file(file_path, globals_, script_dir=SCRIPT_DIR):\n fix_sys_path()\n script_name = os.path.basename(file_path)\n script_name = SCRIPT_EXCEPTIONS.get(script_name, script_name)\n script_path = os.path.join(script_dir, script_name)\n print script_path\n execfile(script_path, globals_)",
"def _run_launch(cls, params):\n assert cls.compose_file is not None, \"compose_file file must be set by subclass\"\n assert cls.testname is not None, \"testname file must be set by subclass\"\n\n test_path = dirname(realpath(sys.argv[0]))\n vmnet_path = dirname(vmnet.__file__) if hasattr(vmnet, '__file__') else vmnet.__path__._path[0]\n local_path = abspath(os.getenv('LOCAL_PATH', '.'))\n compose_path = '{}/compose_files/{}'.format(test_path, cls.compose_file)\n docker_dir_path = '{}/docker_dir'.format(test_path)\n launch_path = '{}/launch.py'.format(vmnet_path)\n if not hasattr(cls, 'docker_dir'): cls.docker_dir = docker_dir_path\n cls.launch_path = launch_path\n\n exc_str = 'python {} --compose_file {} --docker_dir {} --local_path {} {}'.format(\n launch_path,\n cls.compose_file if exists(cls.compose_file) else compose_path,\n cls.docker_dir if exists(cls.docker_dir) else docker_dir_path,\n cls.local_path if hasattr(cls, 'local_path') else local_path,\n params\n )\n os.system(exc_str)",
"def run_execute_file(file_path, globals=None, locals=None):\n if globals is None:\n globals = {}\n globals.update({\n \"__file__\": file_path,\n \"__name__\": \"__main__\",\n })\n with open(file_path, 'rb') as file:\n exec(compile(file.read(), file_path, 'exec'), globals, locals)",
"def ExecuteFile(python_program, main_filename, args, env, module_space,\n coverage_entrypoint, workspace):\n # type: (str, str, list[str], dict[str, str], str, str|None, str|None) -> ...\n # We want to use os.execv instead of subprocess.call, which causes\n # problems with signal passing (making it difficult to kill\n # Bazel). However, these conditions force us to run via\n # subprocess.call instead:\n #\n # - On Windows, os.execv doesn't handle arguments with spaces\n # correctly, and it actually starts a subprocess just like\n # subprocess.call.\n # - When running in a workspace (i.e., if we're running from a zip),\n # we need to clean up the workspace after the process finishes so\n # control must return here.\n # - If we may need to emit a host config warning after execution, we\n # can't execv because we need control to return here. This only\n # happens for targets built in the host config.\n # - For coverage targets, at least coveragepy requires running in\n # two invocations, which also requires control to return here.\n #\n if not (IsWindows() or workspace or coverage_entrypoint):\n _RunExecv(python_program, main_filename, args, env)\n\n if coverage_entrypoint is not None:\n ret_code = _RunForCoverage(python_program, main_filename, args, env,\n coverage_entrypoint, workspace)\n else:\n ret_code = subprocess.call(\n [python_program, main_filename] + args,\n env=env,\n cwd=workspace\n )\n\n if workspace:\n shutil.rmtree(os.path.dirname(module_space), True)\n sys.exit(ret_code)",
"def run_scratch_file(file_name, settings):\n return subprocess.call([file_name])",
"def do_exec(self, arg):\n self.run_file(arg['path'])",
"def run_file(self, user_input):\n # Extract the important information\n self.path, self.name = self.extractor.extract_program_information(user_input)\n\n # Determine what language the program is\n program_type = self.determine_program_type(path, name)\n\n # If the file is python, run it the specific way\n # @TODO: Make it work without shell=True\n if program_type == \"python\":\n subprocess.Popen(\"python \" + self.path + self.name, shell=True)",
"def run(self, filePath = None):\n\n\t\t\tfileName = self._getFilePath(filePath = filePath)\n\t\t\ttry:\n\t\t\t\tos.startfile(fileName)\n\t\t\texcept AttributeError:\n\t\t\t\tsubprocess.call(['open', fileName])",
"def _run_file(file_path, globals_):\n script_name = os.path.basename(file_path)\n\n sys.path = (_PATHS.script_paths(script_name) +\n _PATHS.scrub_path(script_name, sys.path))\n\n fix_google_path()\n\n execfile(_PATHS.script_file(script_name), globals_)",
"def execfile_(filepath: str, _globals: Any) -> None:\n with open(filepath, 'rb') as stream:\n source = stream.read()\n\n code = compile(source, filepath, 'exec')\n exec(code, _globals)",
"def runFile(self, f, name=None):\n return self.run(f.read(), name)",
"def run_file(filepath: str, strict: bool = False, debug: bool = False):\n _, extension = os.path.splitext(filepath)\n try:\n return _ext_fn_map[extension](filepath, strict=strict, debug=debug)\n except KeyError:\n raise ValueError(\"There was an error running the file (invalid file extension).\")",
"def exec_from_inputfile(args):\n args.path = os.path.abspath(args.path)\n if not check(args.path, 'e'):\n clean_up(args.debug, args.folder, args.action, 1)\n\n logger.info(\"You are using the inputfile. All parameters other than folder, API key and debug will be ignored\")\n try:\n startargs = readconfig(args.path)\n makeconfig(*startargs[:13], date=args.today, folder=args.folder)\n\n r = Run('n', args.folder, args.debug)\n r.start()\n\n except TypeError:\n logger.critical(\"Wrong data format. Check the documentation\")\n clean_up(args.debug, args.folder, args.action, 1)",
"def run_executable(args, input_file) -> dict:\n try:\n Path(args.exe).resolve(strict=True)\n except FileNotFoundError as error:\n raise error\n\n os.environ[\"OMP_NUM_THREADS\"] = str(args.omp_num_threads)\n run_command = ['./' + args.exe, input_file]\n\n if 'mpi' or 'hybrid' in args.build_type:\n run_command = ['mpirun', '-np', str(args.np)] + run_command\n\n process = subprocess.run(run_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n if process.returncode != 0:\n warnings.warn('process returned to stderr: ' + \" \".join(run_command))\n\n return {'stdout': process.stdout.decode(\"utf-8\").split('\\n'),\n 'stderr': process.stderr.decode(\"utf-8\").split('\\n'),\n 'returncode': process.returncode}",
"def _run(self, scenario_file, scenario_name, extra=None):\n merged = copy.deepcopy(self._default_extra)\n merged.update(extra or {})\n scenario_text = self._dict_to_single_line_yaml(data=merged)\n args = [\n self._simulator,\n f\"--scenario_file={scenario_file}\",\n f\"--scenario_name={scenario_name}\",\n f\"--scenario_text={scenario_text}\",\n ]\n printable_args = \" \".join([\n shlex.quote(re.sub(r\"[^=]*\\.runfiles/\", \"\", x))\n for x in args\n ])\n print(f\"== Running {printable_args}\", file=sys.stderr, flush=True)\n subprocess.run(args, check=True)",
"def execute_file (self, program):\n with open (program, \"r\") as stream:\n self.execute (stream.read ())\n return self.context",
"def run_file(self, fpath):\n with open(fpath, \"r\", encoding=\"utf-8\") as fin:\n return self.run_commands(fin.read())",
"def test_nsIFile_launch():\n\n assert _do_test_raw('foo.launch()').failed()",
"def do_file(self, filename):\n with open(filename, \"r\") as infile:\n self._run_cmd(infile.read())",
"def magic_run(self, parameter_s =''):\n\n # get arguments and set sys.argv for program to be run.\n opts,arg_lst = self.parse_options(parameter_s,'nipd:l:rs:t:',\n mode='list',list_all=1)\n\n try:\n filename = get_py_filename(arg_lst[0])\n except IndexError:\n warn('you must provide at least a filename.')\n print '\\n@run:\\n',inspect.getdoc(self.magic_run)\n return\n except IOError,msg:\n warn(msg)\n return\n\n save_argv = sys.argv # save it for later restoring\n # perform shell-like expansions on the argument list before passing it\n # to programs\n xvars = os.path.expandvars\n xuser = os.path.expanduser\n xpand = lambda s: xvars(xuser(s))\n sys.argv = [xpand(arg) for arg in arg_lst] \n\n if opts.has_key('i'):\n prog_ns = self.shell.user_ns\n else:\n name = opts.has_key('n') and __name__ or '__main__'\n prog_ns = {'__name__':name}\n\n stats = None\n try:\n if opts.has_key('p'):\n cmd = parameter_s.split()[:-1]\n stats = self.magic_prun('',0,opts,arg_lst,prog_ns)\n else:\n self.shell.safe_execfile(filename,prog_ns,prog_ns)\n if not opts.has_key('i'):\n # update IPython interactive namespace\n self.user_ns.update(prog_ns)\n finally:\n sys.argv = save_argv\n return stats",
"def open_file(path):\n if platform.system() == \"Windows\":\n os.startfile(path)\n elif platform.system() == \"Darwin\":\n subprocess.Popen([\"open\", path])\n else:\n subprocess.Popen([\"xdg-open\", path])",
"def runScript(path=None):\n if path:\n exec(compile(open(path, \"rb\").read(), path, 'exec'))",
"def execute_file(filename):\n try:\n f = open(filename, 'r')\n except IOError, err:\n print \"Could not open file\", filename\n return\n blotish._set_interactive(False)\n exit_flag = False\n for line in f:\n line = line.rstrip()\n exit_flag = execute(line)\n if exit_flag: break\n if interpreter._error_flag: break\n blotish._set_interactive(True)\n return exit_flag",
"def run(\n path,\n host,\n params={}\n ):\n\n logging.info(\"Running '%s' in '%s'...\", path, host)\n client = kfp.Client(f\"{host}\")\n try:\n result = client.create_run_from_pipeline_package(\n pipeline_file=path,\n arguments=params\n )\n logging.info(\"View run: %s/#/runs/details/%s\",\n host,\n result.run_id)\n except Exception as ex:\n logging.error(\"Failed to run '{%s}' with error:\\n{%s}\", path, ex)\n sys.exit(1)",
"def shell_script(scriptfile, args=None):\n # retrieve executable for this script.\n try:\n cmdargs = EXECS[scriptfile[-3:]]\n except Exception as ex:\n print_fail('Cannot locate executable for this script: '\n '{}'.format(scriptfile),\n exc=ex)\n # build command list for subprocess\n cmdlst = list(cmdargs)\n cmdlst.append(scriptfile)\n if args:\n cmdlst.extend(args)\n\n # run/create process\n print('\\nRunning: {}\\n'.format(' '.join(cmdlst)))\n # Shell the process, returns the subprocess.Popen() proc object.\n proc = run_process(cmdlst)\n # Print stdout/stderr\n retcode = print_proc_output(proc)\n\n return retcode",
"def run(filename):\n try:\n with open(filename) as f:\n interp.runcode(f.read())\n except IOError as e:\n self.perror(e)",
"def open_program(path):\r\n os.startfile(path)",
"def launch(file, machine):\n fp = open(file, 'r')\n timeout = 0\n if machine.mpdclean != None :\n process = subprocess.Popen(machine.mpdclean, shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n output = process.communicate()[0]\n\n if machine.mpdboot != None:\n process = subprocess.Popen(machine.mpdboot, shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n output = process.communicate()[0]\n print output\n while(True):\n line = fp.readline()\n if line == \"\": break\n while (True):\n p = subprocess.Popen(line, shell=True, \n stdout=subprocess.PIPE, \n stderr=subprocess.STDOUT)\n t_beginning = time.time()\n seconds_passed = 0\n while True:\n if p.poll() is not None:\n break\n seconds_passed = time.time() - t_beginning\n if timeout != 0 and seconds_passed > timeout:\n p.kill()\n break\n time.sleep(0.1)\n\n output = p.communicate()[0]\n retcode = p.poll()\n #print u'%s : %d' % (line, retcode)\n if machine.qstat == None or retcode == 0 : break\n time.sleep(machine.sleeptime)\n\n fp.close()"
] | [
"0.7223708",
"0.68104553",
"0.633585",
"0.6269134",
"0.6211025",
"0.6098034",
"0.60761744",
"0.5994176",
"0.5913522",
"0.5878413",
"0.5876325",
"0.5871793",
"0.567408",
"0.566498",
"0.5593275",
"0.557758",
"0.5562519",
"0.5535439",
"0.5529694",
"0.5520267",
"0.5504367",
"0.5498012",
"0.5497385",
"0.5458579",
"0.5451359",
"0.54475874",
"0.54469204",
"0.54418457",
"0.5426732",
"0.5424894"
] | 0.81206 | 0 |
CopyFolder(src, dst, overwrite=True) Copies the content of 'src' into 'dst'. The destination may or may not exist. The 'overwrite' parameter will tell the function whether to overwrite files. This supports nested folders. Always returns 0. | def CopyFolder(src, dst, overwrite=True):
if not src.endswith(("/", "\\")):
src = src + "\\"
if not dst.endswith(("/", "\\")):
dst = dst + "\\"
os.makedirs(dst, exist_ok=True)
for file in os.listdir(src):
if not overwrite and os.path.isfile(dst + file):
continue
if os.path.isfile(src + file):
shutil.copy(src + file, dst + file)
elif os.path.isdir(src + file):
CopyFolder(src + file, dst + file, overwrite)
return 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def copy_folder(src: str, dest: str) -> None:\n\tuux.show_info(\"Copying folder \" + src + \" => \" + dest)\n\n\tif not os.path.exists(src):\n\t\tuux.show_error(\"Unable to copy, '\" + src + \"' does not exist.\")\n\t\treturn\n\n\tmkdir(dest)\n\n\tfor fn in os.listdir(src):\n\t\tif os.path.isfile(src + fn):\n\t\t\ttry:\n\t\t\t\tcopy_file(src + fn, dest)\n\t\t\texcept IOError as ex:\n\t\t\t\tuux.show_error(\"Failed to copy file, \" + os.strerror(ex.errno))",
"def _copy_dir(src, dst):\n if os.path.isdir(src):\n os.makedirs(dst, exist_ok=True)\n for item in os.listdir(src):\n s = os.path.join(src, item)\n d = os.path.join(dst, item)\n\n if os.path.isdir(s):\n _copy_dir(s, d)\n else:\n shutil.copy2(s, d)\n\n else:\n os.makedirs(os.path.dirname(dst), exist_ok=True)\n _delete_file(dst)\n shutil.copy2(src, dst)",
"def copy_folder(source, destination):\n\n try:\n shutil.copytree(source, destination)\n except (OSError, IOError):\n return False\n else:\n return True",
"def copy_dir(src: Text, dst: Text) -> None:\n\n if tf.io.gfile.exists(dst):\n tf.io.gfile.rmtree(dst)\n tf.io.gfile.makedirs(dst)\n\n for dir_name, sub_dirs, leaf_files in tf.io.gfile.walk(src):\n for leaf_file in leaf_files:\n leaf_file_path = os.path.join(dir_name, leaf_file)\n new_file_path = os.path.join(dir_name.replace(src, dst, 1), leaf_file)\n tf.io.gfile.copy(leaf_file_path, new_file_path)\n\n for sub_dir in sub_dirs:\n tf.io.gfile.makedirs(os.path.join(dir_name.replace(src, dst, 1), sub_dir))",
"def copydir(src, dst):\n for item in os.listdir(src):\n s, d = os.path.join(src, item), os.path.join(dst, item)\n if os.path.isdir(s):\n if not os.path.isdir(d):\n os.mkdir(d)\n copydir(s, d)\n else:\n shutil.copy(s, d)",
"def copy_dir(src, dst):\n try:\n debug.log(\"copy dir from \"+ src, \"to \"+ dst)\n shutil.copytree(src, dst)\n except Exception as e:\n debug.log(\"Error: happened while copying!\\n%s\\n\"%e)",
"def copy_file_to_multiple_subfolders(src, dst, *args, **kwargs):\n print '\\nSource: {}\\nDestinations parent folder: {}'.format(src, dst)\n filename = os.path.basename(src)\n for folder in (d for d in os.listdir(dst) if os.path.isdir(d)):\n print '\\nCopying {} to {}...'.format(filename, folder)\n try:\n shutil.copy(src, os.path.abspath(dst) + '\\\\' + folder)\n except Exception as e:\n print e",
"def copy(src, dst):\n try:\n shutil.copytree(src, dst)\n except OSError as exc:\n if exc.errno == errno.ENOTDIR:\n shutil.copy(src, dst)\n else:\n raise",
"def copy(src, dst):\n try:\n shutil.copytree(src, dst)\n except OSError as exc:\n if exc.errno == errno.ENOTDIR:\n shutil.copy(src, dst)\n else:\n raise",
"def copy(src, dst):\n os.makedirs(os.path.dirname(dst), exist_ok=True)\n shutil.copy2(src, dst)",
"def copydir(self, destination, **kwargs):\n assert _os.path.isdir(self.__str__()) == True\n _shutil.copy(self.__str__(), destination, **kwargs)",
"def copy_one(self, src, dest):\n if dest.is_dir():\n shutil.rmtree(dest)\n elif dest.exists():\n dest.unlink()\n\n if not dest.parent.exists():\n self.log.debug(f\"creating folder {dest.parent}\")\n dest.parent.mkdir(parents=True)\n\n self.maybe_timestamp(dest.parent)\n\n if src.is_dir():\n shutil.copytree(src, dest)\n else:\n shutil.copy2(src, dest)\n\n self.maybe_timestamp(dest)",
"def copytree(src, dst, overwrite=False, changed_only=True):\n assert os.path.isdir(src), \\\n (\"Source path `%s` does not name an existing directory\" % src)\n errors = []\n if not os.path.exists(dst):\n os.makedirs(dst)\n for name in os.listdir(src):\n srcname = os.path.join(src, name)\n dstname = os.path.join(dst, name)\n try:\n if os.path.isdir(srcname):\n errors.extend(\n copytree(srcname, dstname, overwrite, changed_only))\n else:\n copyfile(srcname, dstname)\n except (IOError, os.error) as why:\n errors.append((srcname, dstname, why))\n return errors",
"def copy_recursively(src, dst, overwrite=False, changed_only=True):\n if os.path.isdir(src):\n copytree(src, dst, overwrite, changed_only)\n else:\n copyfile(src, dst, overwrite, changed_only)",
"def copyAsset(self, src, dst, **kw):\n if self.isfile(src):\n self.copyfile(src, dst)\n else:\n # copy folder\n if not self.exists(dst):\n self.makedirs(dst)\n for name in self.listdir(src):\n self.copyAsset(self.joinpath(src, name), self.joinpath(dst, name), copycache=0)\n\n # copy cache\n cache_src = self.cache_path(src)\n if not os.path.exists(cache_src):\n return\n\n cache_dst = self.cache_path(dst)\n cache_dst_parent = os.path.dirname(cache_dst)\n if not os.path.exists( cache_dst_parent ):\n os.makedirs(cache_dst_parent )\n if not os.path.exists(cache_dst):\n ucopytree(cache_src, cache_dst)",
"def copy(self, src, dest, recursive=False, update=False):\n self.makedir(posixpath.dirname(dest))\n command = CommandBuilder.copy(src, dest, recursive, update)\n return self.execute_command(command)",
"def copyanything(src, dst):\n try:\n copytree(src, dst, dirs_exist_ok=True)\n except FileExistsError as e: # noqa\n pass\n except OSError as err:\n # TODO(dittrich): This causes a pylint error\n # Not sure what test cases would trigger this, or best fix.\n if err.errno == os.errno.ENOTDIR: # type: ignore\n copy(src, dst)\n else:\n raise\n finally:\n remove_other_perms(dst)",
"def copydir(source, dest, ignore=None):\n shutil.copytree(source, dest, ignore_dangling_symlinks=True,\n ignore=shutil.ignore_patterns(*ignore) if ignore else None)",
"def copy(self, dest, overwrite=False):\n dest = normpath(dest)\n try:\n remote = get_remote(dest)\n except ValueError: # Nothing exists at dest, nothing to worry about.\n remote = None\n else: # Something exists here.\n if not overwrite:\n raise ValueError(\"Something exists at %s\" % remote.uri)\n try:\n if self.hash == remote.hash: # Nothing to update.\n pdbox.info(\n \"%s and %s are identical\" % (self.uri, remote.uri),\n )\n return\n except AttributeError: # RemoteFolder doesn't have a hash.\n pass\n\n if not pdbox._args.get(\"dryrun\"):\n if overwrite and remote:\n # There's no way to copy and overwrite at the same time,\n # so delete the existing file first.\n remote.delete()\n\n result = execute(pdbox.dbx.files_copy_v2, self.path, dest)\n pdbox.debug(\"Metadata respones: %s\" % result.metadata)\n\n pdbox.info(\"Copied %s to %s\" % (self.uri, dbx_uri(dest)))\n if not pdbox._args.get(\"dryrun\"): # Return the newly created object.\n return get_remote(None, meta=result.metadata)",
"def copy(self, destination):\n destination = Path(destination)\n src_base = str(self.directory)\n if self.flatten:\n dst_base = destination\n else:\n dst_base = Path(destination.joinpath(self.directory.stem))\n\n for src in self.locations_to_copy:\n if src.is_dir():\n for dir_path, dir_names, file_names in os.walk(str(src)):\n if self.flatten:\n dst_dir = dst_base\n else:\n dst_dir = Path(dir_path.replace(src_base, str(dst_base)))\n if not dst_dir.exists():\n dst_dir.mkdir(parents=True)\n for file in file_names:\n shutil.copy2(os.path.join(dir_path, file), str(dst_dir))\n else:\n if self.flatten:\n dst_dir = dst_base\n else:\n dst_dir = Path(str(src.parent).replace(src_base, str(dst_base)))\n if not dst_dir.exists():\n dst_dir.mkdir(parents=True)\n shutil.copy2(str(src), str(dst_dir))",
"def copy_file(src_file,dst_folder):\n from shutil import copyfile\n from os.path import split\n copyfile(src_file, dst_folder+split(src_file)[1])\n return",
"def copy(src_path, dst_path, src_fs=None, dst_fs=None, overwrite=False):\n src_path = MPath.from_inp(src_path, fs=src_fs)\n dst_path = MPath.from_inp(dst_path, fs=dst_fs)\n\n if not overwrite and dst_path.fs.exists(dst_path):\n raise IOError(f\"{dst_path} already exists\")\n\n # create parent directories on local filesystems\n dst_path.parent.makedirs()\n\n # copy either within a filesystem or between filesystems\n if src_path.fs == dst_path.fs:\n src_path.fs.copy(str(src_path), str(dst_path))\n else:\n # read source data first\n with src_path.open(\"rb\") as src:\n content = src.read()\n # only write to destination if reading source data didn't raise errors,\n # otherwise we can end up with empty objects on an object store\n with dst_path.open(\"wb\") as dst:\n dst.write(content)",
"def copy(source, destination):\n if os.path.isdir(source):\n return __copytree(source, destination)\n else:\n return __copyfile2(source, destination)",
"def copyDir(srcPath, destPath):\n shutil.copytree(srcPath, destPath)",
"def copy(source, destination):\r\n\r\n source_ = os.path.abspath(os.path.expanduser(source))\r\n destination_ = os.path.abspath(os.path.expanduser(destination))\r\n\r\n if not os.path.exists(destination_) and not os.path.isfile(source_):\r\n os.makedirs(destination_)\r\n\r\n def recurse(source, destination):\r\n for entry in os.listdir(source):\r\n entry_path = os.path.join(source, entry)\r\n if os.path.isdir(entry_path):\r\n entry_dest = os.path.join(destination, entry)\r\n if os.path.exists(entry_dest):\r\n if not os.path.isdir(entry_dest):\r\n raise IOError('Failed to copy {0} a directory.'\r\n .format(entry_dest))\r\n recurse(entry_path, entry_dest)\r\n else:\r\n shutil.copytree(entry_path, entry_dest)\r\n else:\r\n shutil.copy2(entry_path, destination)\r\n\r\n\r\n if os.path.isdir(source_):\r\n recurse(source_, destination_)\r\n\r\n elif os.path.isfile(source_):\r\n dest_dir = os.path.dirname(destination_)\r\n if not os.path.exists(dest_dir):\r\n os.makedirs(dest_dir)\r\n shutil.copy2(source_, destination_)\r\n logger.info('copying %s to %s' % (source_, destination_))\r\n else:\r\n logger.warning('skipped copy %s to %s' % (source_, destination_))",
"def copyDir(self, src, subpath):\n dst = self.output_path + \"/\" + subpath\n shutil.copytree(src, dst)",
"def oh_folders(src, dest=dest):\n copytree(src, dest, ignore=ignore_patterns(*ignore_list), dirs_exist_ok=True)",
"async def _copy_folder_files(self, src_dir, dest_dir):\n for dir_item in os.listdir(src_dir):\n src_path = os.path.join(src_dir, dir_item)\n if os.path.isfile(src_path):\n await self._copy_file_with_hook(dir_item, src_path, os.path.join(dest_dir, dir_item))",
"def copy(self, src, dst, label=None):\n self._tag(dst, label)\n self._mkdir_for(dst)\n shutil.copyfile(self._rootjoin(src), os.path.join(self.chroot, dst))",
"def copy(self, src, dest):\n\n src = os.path.join(os.path.dirname(__file__), \"collections\", \"kitchensink\", src)\n dest = os.path.join(self.checkout, dest)\n if os.path.isdir(src):\n shutil.copytree(src, dest)\n else:\n shutil.copy(src, dest)\n return dest"
] | [
"0.75916386",
"0.6912031",
"0.6744624",
"0.6506837",
"0.64595366",
"0.64063305",
"0.62938625",
"0.6196626",
"0.6196626",
"0.61707675",
"0.6094042",
"0.6064407",
"0.60359025",
"0.6029565",
"0.6015132",
"0.60149294",
"0.5964577",
"0.59552974",
"0.59481025",
"0.5929881",
"0.59262204",
"0.5902163",
"0.5888877",
"0.58877414",
"0.58697265",
"0.5868425",
"0.5849449",
"0.58429515",
"0.5829567",
"0.5825277"
] | 0.80914754 | 0 |
CopyFile(path, file, new) Creates of copy of 'file' with name 'new' in 'path'. Always returns 0. | def CopyFile(path, file, new):
if not path.endswith(("/", "\\")):
path = path + "\\"
shutil.copy(path + file, path + new)
return 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def copy_file(file_name, new_file_name):\n\n import os\n\n if not os.path.exists(file_name):\n raise FileNotFoundError\n\n with open(str(file_name), 'rb') as infile:\n with open(str(new_file_name), 'wb') as outfile:\n while True:\n buff = infile.read(10240)\n if buff:\n outfile.write(buff)\n else:\n break\n\n return",
"def _copy_file ( self, source, dest ):\n return",
"def copyFile(srcPath, destPath):\n shutil.copy(srcPath, destPath)",
"def _copyFile(self, source, dstDir):\n dstFile = os.path.join(dstDir, os.path.basename(source))\n touch = \"/usr/bin/touch\" if OSUtilities.isMacOS() else \"/bin/touch\"\n subprocess.call([touch, dstFile])\n subprocess.call([\"/bin/cp\", source, dstDir])\n self._logger.info(\"Copying file \" + source + \" to \" + dstDir)\n self._numCopiedFiles += 1",
"def copyfile(source, dest, newname=None):\n\n if not os.path.exists(source):\n #print 'no such file %s' %source\n return False\n shutil.copy(source, newname)\n dest = os.path.join(dest, newname)\n if os.path.exists(dest):\n os.remove(dest)\n shutil.move(newname, dest)\n return True",
"def copy_file(file: str, dest: str) -> None:\n\tuux.show_debug(\"Copying \" + str(file) + \" => \" + str(dest))\n\tshutil.copy2(file, dest)",
"def copy_file(source_file_name, dest_file_name):\n print(\"Copying \" + source_file_name + \" to \" + dest_file_name)\n shutil.copy2(source_file_name, dest_file_name)\n print(\"Copying done.\")",
"def file_copy(self, path, dest, override=False):\n # first check if path exists on the file system\n if self.exists(dest) and not override:\n return\n if j.sal.fs.exists(path):\n return self.file_copy_from_local(path, dest)\n else:\n return self.file_copy_form_bcdbfs(path, dest)",
"def copy_file(file, destination):\n with open(file, 'rb') as infile, open(destination, 'wb') as outfile:\n outfile.write(infile.read())",
"def copyFile(source, target):\n\tfrom shutil import copyfile, copystat, copymode\n\tfrom os.path import split\n\tsource = adaptPath(source)\n\ttarget = adaptPath(target)\n\tif int(getFileModifTime(source)) != int(getFileModifTime(target)):\n\t\tmakedir(split(target)[0])\n\t\tcopyfile(source, target)\n\t\tcopystat(source, target)\n\t\tcopymode(source, target)\n\t#~ else:\n\t\t#~ print (\"%s not copied\"%(target))",
"def copyFile(src, dest):\n try:\n shutil.copy(src,dest)\n except shutil.Error as e:\n print(\"Error: \" + str(e))\n except IOError as e:\n print(\"Error: \" + e.strerror)",
"def copyFile(source,destination):\r\n logging.info(\"source\",source)\r\n logging.info(\"destination\",destination)\r\n try:\r\n shutil.copy(source, destination)\r\n logging.info(\"File copied successfully.\")\r\n \"\"\"If source and destination are same\"\"\"\r\n except shutil.SameFileError:\r\n logging.info(\"File not copied sucessfuly.\")\r\n \"\"\"List files and directories\"\"\"\r\n logging.info(\"After copying file:\")\r\n logging.info(os.listdir(destination))\r\n \"\"\"logging.info path of newly\r\n created file\"\"\"\r\n logging.info(\"Destination path:\", destination)",
"def act_copy_file(self, file_source, file_target):\n try:\n path = os.path.dirname(file_target)\n if not os.path.exists(path):\n os.makedirs(path)\n shutil.copy2(file_source, file_target)\n self.logger.debug('%s: Action: <copy> %s -> %s', self.name, file_source, file_target)\n except:\n self.logger.exception('Error on file copy: %s -> %s', file_source, file_target)",
"def file_copy(self, from_path, to_path):\n params = {'root': self.session.root,\n 'from_path': format_path(from_path),\n 'to_path': format_path(to_path),\n }\n\n url, params, headers = self.request(\"/fileops/copy\", params)\n\n return self.rest_client.POST(url, params, headers)",
"def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)",
"def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)",
"def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)",
"def copy_file(source_file, target_file):\n\t# print('\\n\\nCopying [{}] to [{}].\\n\\n'.format(source_file, target_file))\n\trun_rsync([source_file, target_file])",
"def copy(self, src_path: str, tgt_path: str) -> None:",
"def copy_file(name, n_name):\n\n if os.path.isfile(config_tools.full_dest+name):\n try:\n shutil.copyfile(config_tools.full_dest+name, config_tools.full_dest+n_name)\n except OSError:\n print(f\"Не возможно копировать файл {name}\")\n else:\n print(f\"Файл {config_tools.full_dest+name} скопирован как {config_tools.full_dest+n_name}\")",
"def __copyfile(source, destination):\n logger.info(\"copyfile: %s -> %s\" % (source, destination))\n try:\n __create_destdir(destination)\n shutil.copy(source, destination)\n return True\n except Exception as e:\n logger.error(\n \"copyfile: %s -> %s failed! Error: %s\", source, destination, e\n )\n return False",
"def copy_file(fs, inpath, outpath):\n fs.copy(inpath, outpath)",
"def copy_file(source, destination):\n\n try:\n shutil.copy(source, destination)\n except (OSError, IOError):\n return False\n else:\n return True",
"def copy_file(src_file,dst_folder):\n from shutil import copyfile\n from os.path import split\n copyfile(src_file, dst_folder+split(src_file)[1])\n return",
"def copy_file(src, dest):\n with open_local_or_gcs(src, 'r') as h_src:\n with open_local_or_gcs(dest, 'w') as h_dest:\n shutil.copyfileobj(h_src, h_dest)",
"def copyfile(self, destination, **kwargs):\n assert _os.path.isfile(self.__str__()) == True\n _shutil.copyfile(self.__str__(), destination, **kwargs)",
"def CopyFileTo(self, filename): # real signature unknown; restored from __doc__\n pass",
"def copyFile( src, dest ):\n\tinFile = open( src, 'r' )\n\toutFile = open( dest, 'w' )\n\tfor line in inFile:\n\t\toutFile.write( line )\n\toutFile.close()\n\tinFile.close()",
"def copyFile(filename, sourceDir, targetDir, renameTo=None, silent=True):\n\tif renameTo == None: renameTo = filename\n\tfullname_source = os.path.join(sourceDir, filename)\n\tfullname_target = os.path.join(targetDir, renameTo)\n\tshutil.copy(fullname_source, fullname_target)\n\tif silent==False:\n\t\tprint(\"File \"+fullname_source+\" copied to \"+source_dir)",
"def cp_to_file(fn0, fn):\n\n # keep rewriting attributes\n shutil.copyfile(fn0, fn)"
] | [
"0.7342367",
"0.7062944",
"0.6988448",
"0.6937015",
"0.6923848",
"0.686875",
"0.67582273",
"0.6685474",
"0.66165555",
"0.6615056",
"0.6586806",
"0.65143555",
"0.6512323",
"0.646609",
"0.6458299",
"0.6458299",
"0.6458299",
"0.64416116",
"0.64161277",
"0.6405297",
"0.6394165",
"0.6374756",
"0.6372155",
"0.6344616",
"0.6321892",
"0.6300394",
"0.62297016",
"0.6229411",
"0.6229139",
"0.6216263"
] | 0.82788193 | 0 |
DeleteFile(path) Deletes all files and folders given. Always returns 0. | def DeleteFile(*path):
for line in path:
if os.path.isdir(line):
shutil.rmtree(line)
if os.path.isfile(line):
os.remove(line)
return 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_file(path):\n return files.delete_file(path)",
"def file_delete(self, path):\n params = {'root': self.session.root, 'path': format_path(path)}\n\n url, params, headers = self.request(\"/fileops/delete\", params)\n\n return self.rest_client.POST(url, params, headers)",
"def delete_file(self, path):\n return self.client._perform_empty(\n \"DELETE\", \"/projects/%s/managedfolders/%s/contents/%s\" % (self.project_key, self.odb_id, utils.quote(path)))",
"def delete_files_from_folder(path: str) -> None:\n for the_file in os.listdir(path):\n file_path = os.path.join(path, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n except Exception as e:\n print(e)",
"def delete_file(path):\n if os.path.isfile(path):\n os.remove(path)",
"def delete(path, recursive=False):\n fs.delete(path, recursive)",
"def delete_file(path):\n if os.path.isfile(path):\n os.remove(path)",
"def _delete_file(path):\n if os.path.isfile(path):\n os.remove(path)",
"def _delete_file(path):\n if os.path.isfile(path):\n os.remove(path)",
"def _delete_file(path):\n if os.path.isfile(path):\n os.remove(path)",
"def deleteDir(self, path):\n\n # remove directory even if it has files\n shutil.rmtree(path, ignore_errors=True)",
"def del_files_from_disk(path):\n\n shutil.rmtree(path) #,ignore_errors=True)",
"def rm(self, path):\n try:\n basedir, item = os.path.split(path)\n postdata = codecs.encode(json.dumps({ 'baseDir': basedir, 'items': [ item ] }), 'utf-8')\n self._urlopen('/api/fileops/delete', postdata).read()\n except HTTPError as err:\n raise RuntimeError(\"Unable to delete '{}'\".format(path))",
"def file_delete(file_name:str, folder_path: List[str]): \n drive = _drive_gen()\n deleted = False\n folder_id, files = _list_file(folder_path, drive)\n for file in files:\n if file['title'] == file_name:\n file.Trash()\n deleted = True\n if not deleted:\n raise FileNotFoundError(f\"file {file_name} is not found under /{'/'.join(folder_path)}\")",
"def delete_file(self, path):\n if not path_exists(path, self._store_folder):\n raise NotFoundException(\"\")\n os.remove(path)",
"def RemoveFile(*path):\n file_path = os.path.join(*path)\n try:\n os.remove(file_path)\n except OSError, e:\n if e.errno != errno.ENOENT:\n raise",
"def delete_folder(path):\n command = ['rm', '-rf', TEST_DIR]\n file_operation(path, command)",
"def remove(path):\n if os.path.isfile(path):\n os.remove(path) # remove the file\n elif os.path.isdir(path):\n shutil.rmtree(path) # remove dir and all contains\n else:\n raise ValueError(\"file {} is not a file or dir.\".format(path))",
"def remove(path):\n if os.path.isfile(path):\n os.remove(path) # remove the file\n elif os.path.isdir(path):\n shutil.rmtree(path) # remove dir and all contains\n else:\n raise ValueError(\"file {} is not a file or dir.\".format(path))",
"def delete(self, path):\n path = path.strip(\"/\")\n if not path:\n raise HTTPError(400, \"Can't delete root\")\n self.delete_file(path)\n self.checkpoints.delete_all_checkpoints(path)",
"def _delete_file(self, path):\n if not self.mount():\n return False\n uri = self.path_to_uri(path)\n return self.gvfs.delete_file(uri)",
"def delete(self, path):\n full_path = self._get_full_path(path)\n if os.path.exists(full_path):\n os.remove(full_path)",
"def remove(path):\n if os.path.isfile(path):\n os.remove(path) # remove the file\n elif os.path.isdir(path):\n shutil.rmtree(path) # remove dir and all contains\n else:\n print(\" - file {} is not a file or dir.\".format(path))",
"def delete_folder(path: str) -> None:\n\tuux.show_info(\"Deleting \" + path)\n\n\tif not os.path.exists(path):\n\t\t# Path does not exist\n\t\treturn\n\n\ttry:\n\t\tshutil.rmtree(path, True)\n\texcept OSError as ex:\n\t\tuux.show_warning(\"Failed to delete directory, \" + os.strerror(ex.errno))",
"def rm(path):\n abs_path = navigate.get_abs_path(path)\n parent, name = navigate.split_path(abs_path)\n access_token = db.get_access_to_file(parent, name)\n if access_token is not None:\n dbox_path = '/' + name\n client = dropbox.client.DropboxClient(access_token)\n client.file_delete(dbox_path)\n db.remove_file(access_token, parent, name)",
"def delete_file(self, path):\n raise HTTPError(\n 501,\n \"Narrative deletion not implemented here. Deletion is handled elsewhere.\",\n )",
"def delete_file(file: str) -> None:\n\tuux.show_info(\"Deleting \" + file)\n\n\tif not os.path.exists(file):\n\t\t# Files does not exist\n\t\treturn\n\n\tos.remove(file)",
"def rm(path):\n try:\n shutil.rmtree(path)\n except Exception as e:\n print(\"* [Error] occured: {}\\n\".format(e))\n else:\n print(\"* Done.\\n\")",
"def delete_all(path, delete_root_dir=False):\n if not os.path.isdir(path):\n return\n files=os.listdir(path)\n for x in files:\n fullpath=os.path.join(path, x)\n if os.path.isfile(fullpath):\n os.remove(fullpath)\n elif os.path.isdir(fullpath):\n delete_all(fullpath, True)\n if delete_root_dir:\n os.rmdir(path)",
"def remove_file(path: str) -> None:\n\tremove(path)"
] | [
"0.7869286",
"0.76546806",
"0.7388755",
"0.72415155",
"0.72076136",
"0.72005427",
"0.7192144",
"0.7170887",
"0.7170887",
"0.7164125",
"0.70655143",
"0.69983995",
"0.6865429",
"0.68378466",
"0.67944336",
"0.6735856",
"0.6728706",
"0.67129046",
"0.67129046",
"0.67070305",
"0.6676376",
"0.66520804",
"0.6644944",
"0.66166466",
"0.66005707",
"0.65586305",
"0.65510523",
"0.6541166",
"0.6489235",
"0.6444245"
] | 0.80297625 | 0 |
RenameFile(path, org, new) Renames item x of 'org' to item x of 'new' in path. Returns 0 if all items could be renamed. Returns more than 0 if there were more items in 'org' than 'new' Returns less than 0 if there were more items in 'new' than 'org' | def RenameFile(path, org, new):
cont = zip(org, new)
if not path.endswith(("/", "\\")):
path = path + "\\"
for file in cont:
if os.path.isfile(path + file[0]):
os.rename(path + file[0], path + file[1])
return len(org) - len(new) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rename(self,oldItem,newItem):\r\n raise AbstractError\r\n return False",
"def rename(old, new):",
"def rename(old, new):",
"def projectFileRenamed(self, oldfn, newfn):\n editor = self.getOpenEditor(oldfn)\n if editor:\n editor.fileRenamed(newfn)",
"def rename(oldname, newname):",
"def rename(path_file_folder, new_name):\n old_name = path_file_folder[path_file_folder.rfind('/') + 1:] if '/' in path_file_folder else path_file_folder\n if old_name == new_name:\n raise DegooError(f\"rename: Old name and new name \\\"{new_name}\\\" cannot be the same\")\n\n if isinstance(path_file_folder, int):\n file_id = path_file_folder\n elif isinstance(path_file_folder, str):\n file_id = path_id(path_file_folder)\n else:\n raise DegooError(f\"rm: Illegal file: {path_file_folder}\")\n\n return api.rename_file(file_id, new_name)",
"def RenameFile(self, oldname: str, newname: str) -> None:\n ...",
"def rename_file(source, oldname, newname):\n #source = client_variables.output_folder\n renamefiles = os.listdir(source)\n ext = (\".xlsx\", \".csv\", \".pdf\", \".png\")\n for renamefile in renamefiles:\n if renamefile.endswith(ext):\n renamefile = source + \"/\" + renamefile\n print \"renaming:\", renamefile\n newname = source + \"/\" + newname\n print \"newname:\", newname\n os.rename(renamefile, newname)\n elif renamefile.startswith(oldname):\n renamefile = source + \"/\" + renamefile\n print \"renaming:\", renamefile\n newname = source + \"/\" + newname\n print \"newname:\", newname\n os.rename(renamefile, newname)",
"def rename_files(files: list, new_file_name: str) -> bool:\n if len(files) == 0:\n print(\"list of files was empty. Could not rename files.\")\n return False\n\n path = None\n for index, item in enumerate(files, start=1):\n path = Path(rf\"{item}\")\n\n if path.exists():\n # Path class takes care of path slashes depending on system\n new_path = Path(str(path.parent) + \"/\" + new_file_name +\n str(index) + path.suffix)\n path.replace(new_path)\n\n else:\n print(\"Path did not exist. Check file path for errors.\")\n return False\n return True",
"def rename(old, new):\n\ttry:\n\t\tos.rename(old, new)\n\texcept OSError as e:\n\t\tif e.errno != errno.EEXIST:\n\t\t\traise\n\t\tos.remove(old)",
"def fileRename(current_file,num,digits):\n # Key, value pairs of what to replace.\n dictobj = {\n '<num>': get_numbering_format(digits, num),\n '<datetaken>': date_to_string(get_date_taken(current_file),'%Y%m%d__%H_%M'),\n '<dname>': dirname\n }\n # Rename\n new_filename = multi_replace(filename_pattern, dictobj)\n shutil.move(current_file, new_filename)",
"def change_nm(src,dst):\n\timport os\n\ttry:\n\t\tos.rename(src,dst)\n\texcept:\n\t\tprint \"this is a mistake\"\n\t\treturn -1\n\n\treturn 0",
"def rename_file(old_path, new_path):\n if os.path.exists(new_path):\n raise FileExistsError(errno.EEXIST, os.strerror(errno.EEXIST),\n old_path, new_path)\n os.rename(old_path, new_path)",
"def fileRenameandReplace(filename,newfilename):\n try:\n os.rename(filename,newfilename)\n logging.info(\"Json file renamed in PD path\")\n except Exception as er:\n print (\"Not able to rename the json file \")\n return False",
"def rename_project_file(self, old_project=None, new_project=None):\n old_is_project = type(old_project) is Project\n new_is_project = type(new_project) is Project\n\n # cancel if arguments are not projects\n if not old_is_project or not new_is_project:\n return False\n\n # generate filenames\n path = self.data_path + self.project_dir\n filename = path + '/' + self.us(old_project.project_id()) + '.flproject'\n filename_bu = path + '/' + self.us(old_project.project_id()) + '.flproject_bu'\n filename_new = path + '/' + self.us(new_project.project_id()) + '.flproject'\n filename_new_bu = path + '/' + self.us(new_project.project_id()) + '.flproject_bu'\n\n # check if the files exist and rename them\n if os.path.isfile(filename):\n os.rename(filename, filename_new)\n\n if os.path.isfile(filename_bu):\n os.rename(filename_bu, filename_new_bu)\n\n return True",
"def rename(project, project_dir, files_dir, recursive, offset):\n project = NamingProject(project, project_dir=project_dir)\n renamer = FileRename(project, files_dir, recursive=recursive, offset=offset)\n renamer.run()",
"def rename_file(self, path, new_name):\n try:\n self.rename_narrative(self._parse_path(path), self.get_userid(), new_name)\n except WorkspaceError as err:\n raise HTTPError(err.http_code, err.message)\n except Exception as err:\n raise HTTPError(\n 500, \"An error occurred while renaming your Narrative: {}\".format(err)\n )",
"def rename(path, new_path):\n fs.rename(path, new_path)",
"def rename(self,oldName,newName):\n #--Update references\n fileInfo = self[oldName]\n self[newName] = self[oldName]\n del self[oldName]\n self.table.moveRow(oldName,newName)\n #--FileInfo\n fileInfo.name = newName\n #--File system\n newPath = os.path.join(fileInfo.dir,newName)\n oldPath = os.path.join(fileInfo.dir,oldName)\n renameFile(oldPath,newPath)\n #--Done\n fileInfo.madeBackup = False",
"def base_rename(self, new_name):\n\n new_path = join(dirname(self.fspath), new_name)\n\n return self.rename(new_path)",
"def rename(file, format_spec, dir=DIR()):\n\tfile = pathlib.Path(file)\n\t\n\tprint(\"Parsing {name}...\".format(name=file.name))\n\t\n\tarticle = Article(file.read_bytes())\n\t\n\tnew_file = format_spec.format(\n\t\tarticle = article,\n\t\ttitle = dir.getTitle(file) or article.getTitle(),\n\t\tauthor = article.getAuthor() or dir.getAuthor(file),\n\t\tboard = article.getBoard(),\n\t\ttime = article.getTime() or dir.getTime(file) or format_dummy\n\t)\n\tnew_file = safe_file_name(new_file)\n\tnew_file = file.with_name(new_file)\n\t\n\tif file == new_file:\n\t\tprint(\"Same file name!\\n\")\n\t\treturn\n\t\n\tif new_file.exists():\n\t\tnum = 2\n\t\t\n\t\twhile True:\n\t\t\ttemp_file = \"{name} ({num}){ext}\".format(\n\t\t\t\tnum = num,\n\t\t\t\tname = new_file.stem,\n\t\t\t\text = new_file.suffix\n\t\t\t)\n\t\t\ttemp_file = new_file.with_name(temp_file)\n\t\t\t\n\t\t\tif file == temp_file:\n\t\t\t\tprint(\"Same file name!\\n\")\n\t\t\t\treturn\n\t\t\t\t\n\t\t\tif not temp_file.exists():\n\t\t\t\tnew_file = temp_file\n\t\t\t\tbreak\n\t\t\t\t\n\t\t\tnum += 1\n\t\n\tprint(\"Rename to {name}...\\n\".format(name=new_file.name))\n\t\n\tfile.rename(new_file)",
"def rename(path, new_name):\r\n path = encode(path)\r\n if path.lower().startswith(\"smb://\"):\r\n new_name = encode(new_name, True)\r\n try:\r\n samba.rename(os.path.basename(path), new_name, os.path.dirname(path))\r\n except:\r\n import traceback\r\n logger.info(\r\n \"deportesalacarta.core.filetools mkdir: Error al renombrar el archivo o carpeta\" + traceback.format_exc())\r\n platformtools.dialog_notification(\"Error al renombrar\", path)\r\n return False\r\n else:\r\n new_name = encode(new_name, False)\r\n try:\r\n os.rename(path, os.path.join(os.path.dirname(path), new_name))\r\n except OSError:\r\n import traceback\r\n logger.info(\r\n \"deportesalacarta.core.filetools mkdir: Error al renombrar el archivo o carpeta\" + traceback.format_exc())\r\n platformtools.dialog_notification(\"Error al renombrar\", path)\r\n return False\r\n\r\n return True",
"def rename_name_gene(listOfFile, PATH_FASTA_RENAME) :\n\n\tprint \"\\n#################\"\n\tprint \"# Rename protein\"\n\tprint \"#################\\n\"\n\n\tcreate_folder(PATH_FASTA_RENAME)\n\n\tnew_listOfFile=[]\n\n\tfor my_file in listOfFile :\n\t\tif os.stat(my_file).st_size != 0 :\n\t\t\tnew_listOfFile.append(my_file)\n\n\tseq_to_rename = find_rename_fasta(new_listOfFile)\n\tdict_count = dict([(sequence[1:].rstrip(\" \"), 0) for sequence in seq_to_rename])\n\tprogression=1\n\tnumber_of_file = len(new_listOfFile)\n\n\tfor my_file in new_listOfFile :\n\n\t\tfile_name = os.path.basename(my_file)\n\n\t\tsys.stdout.write(\"{:.2f}% : {}/{} files renamed\\r\".format(progression/float(number_of_file)*100, progression,number_of_file))\n\t\tsys.stdout.flush()\n\t\tprogression += 1\n\n\t\thandle = open(os.path.join(PATH_FASTA_RENAME, file_name), 'w')\n\t\tfasta_reading = SeqIO.parse(my_file, \"fasta\")\n\n\t\tfor seq in fasta_reading :\n\t\t\tif seq.id in dict_count :\n\t\t\t\tif dict_count[seq.id] == 0 :\n\t\t\t\t\tdict_count[seq.id] += 1\n\t\t\t\telse :\n\t\t\t\t\tdict_count[seq.id] += 1\n\t\t\t\t\tif \"NC_\" in seq.id :\n\t\t\t\t\t\t# NOTE New name : NC_XXXXXX[_numero de systeme si deux systemes trouvés][_Num(et le nombre de fois nom trouvé)]_nomSysteme_D_nomProteine\n\t\t\t\t\t\tseq.id = \"_\".join(seq.id.split(\"_\")[:2])+\"_Num\"+str(dict_count[seq.id])+\"_\"+\"_\".join(seq.id.split(\"_\")[2:])\n\n\t\t\t\t\telse :\n\t\t\t\t\t\t# NOTE New name : NNNN[_numero de systeme si deux systemes trouvés][_Num(et le nombre de fois nom trouvé)]_nomSysteme_V_nomProteine\n\t\t\t\t\t\tseq.id = seq.id.split(\"_\")[0]+\"_Num\"+str(dict_count[seq.id])+\"_\"+\"_\".join(seq.id.split(\"_\")[1:])\n\t\t\t\t\tseq.name = seq.id\n\t\t\t\t\tseq.description = \"\"\n\n\t\t\tSeqIO.write(seq, handle, \"fasta\")\n\n\t\thandle.close()\n\n\tprint\n\tprint \"Done!\"\n\treturn",
"def testRename(self):\n def _check(results):\n self.assertEqual(results[0], b'')\n self.assertEqual(results[1], b'testfile2')\n return self.runCommand('rename testfile2 testfile1')\n\n d = self.runScript('rename testfile1 testfile2', 'ls testfile?')\n d.addCallback(_check)\n d.addCallback(self.assertEqual, b'')\n return d",
"def fs_rename_entry(self, oldPath, newPath):\n\t\treturn Job(SDK.PrlSrv_FsRenameEntry(self.handle, oldPath, newPath)[0])",
"def rename_file(file_path, equipt_nr):\n work_tuples = parse_columns()\n # Regex used to get differents parts of the file path\n path_regex = re.compile(r'(?P<path>[\\w\\\\:]*)\\\\(?P<filename>[\\w]*).(?P<extension>[\\w].)')\n # Match object containing the different parts of the file path\n match = path_regex.search(file_path)\n\n # Getting the right file to rename\n associated_nr = 0\n for ii in work_tuples:\n if match.group('filename') == ii[0]:\n associated_nr = ii[equipt_nr+1]\n\n # Renaming the file\n os.rename(file_path, match.group('path')+'\\\\'+associated_nr+'.'+match.group('extension'))",
"def test_6a_change_file_name(self):\n if (not GST.logged_in) or (not GST.data_testing_swift_mounted):\n raise unittest.SkipTest(\"Skipped for failed login or failed mounting container.\")\n elif not GST.rename_file_test_ready:\n raise unittest.SkipTest(\"Skipped for failed to prepare renaming test.\")\n self.dismiss_dialogs()\n function = js_func[\"rename\"] % (GST.gs_file_paths[\"file_to_rename_path\"], GST.gs_file_paths[\"after_rename_path\"])\n try:\n self.send_request(function, \"rename()\")\n except Exception as e:\n raise RenameException(\"Failed to rename the file: \" + e.__str__())\n try:\n response = self.get_response()\n assert \"Success\" in response\n self.refresh_page()\n except AssertionError:\n raise RenameException(\"Failed to rename the file: \" + response)",
"def re_name(name,new_name):\n\n try:\n os.rename(config_tools.full_dest+name,config_tools.full_dest+new_name)\n except OSError:\n print(f\"Не удалось переименовать {name}\")\n else:\n print(f\"{name} успешно переименновавано в {new_name}\")",
"def test_component_rename_error_bad_new_name(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('component rename component1 component2')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def batch_rename(work_dir, old_ext, new_ext):\n\n for filename in os.listdir(work_dir):\n\n split_file = os.path.splitext(filename)\n file_ext = split_file[1]\n\n if old_ext == file_ext:\n newfile = split_file[0] + new_ext\n os.rename(\n os.path.join(work_dir, filename),\n os.path.join(work_dir, newfile)\n )\n\n print(\"Rename is over\")\n print(os.listdir(work_dir))"
] | [
"0.6143776",
"0.60080373",
"0.60080373",
"0.58179",
"0.57596266",
"0.5529786",
"0.5525997",
"0.55170435",
"0.5496105",
"0.5486858",
"0.546561",
"0.5431416",
"0.54079854",
"0.5389883",
"0.53634936",
"0.53631765",
"0.52538437",
"0.52488685",
"0.52396923",
"0.52350086",
"0.52344745",
"0.5211414",
"0.51859635",
"0.5164709",
"0.5106255",
"0.5085845",
"0.5081746",
"0.5080378",
"0.50744605",
"0.5060539"
] | 0.80195767 | 0 |
AttribFile(file, attr="R S H I", params) Sets Windows file and folders attributes. Default attribute change is to remove all unwanted attributes. Parameters are optional, it's mainly to touch folders as well. Returns 0 if it completed successfully. | def AttribFile(file, attr="-R -S -H -I", *params):
params = " ".join(params).split() # handle tuples and multispaced items
if isinstance(attr, (tuple, list, set)):
attr = " ".join(attr)
lines = attr.split() + [file] + params
attrib = subprocess.Popen(["C:\\Windows\\System32\\attrib.exe"] + lines)
attrib.communicate()
return attrib.returncode | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def SetAttributes(self, attr):\r\n \r\n if self._ownsAttr:\r\n del self._attr\r\n \r\n self._attr = attr\r\n self._ownsAttr = False",
"def set_file_attr(self):\n if self.resolution == 1000:\n satellite_type = ['AQUA', 'TERRA']\n if self.satellite in satellite_type:\n try:\n h4r = SD(self.in_file, SDC.READ)\n self.file_attr = attrs2dict(h4r.attributes())\n except Exception as e:\n print(str(e))\n else:\n raise ValueError(\n 'Cant read this satellite`s data.: {}'.format(self.satellite))\n else:\n raise ValueError(\n \"Cant handle this resolution: \".format(self.resolution))",
"def testGetStatAttribute(self):\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS,\n identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file',\n parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n stat_attribute = file_entry._GetStatAttribute()\n\n self.assertIsNotNone(stat_attribute)\n self.assertIsNone(stat_attribute.device_number)\n self.assertEqual(stat_attribute.group_identifier, 20)\n self.assertEqual(stat_attribute.inode_number, 21)\n self.assertEqual(stat_attribute.mode, 0o100644)\n self.assertEqual(stat_attribute.number_of_links, 1)\n self.assertEqual(stat_attribute.owner_identifier, 501)\n self.assertEqual(stat_attribute.size, 22)\n self.assertEqual(stat_attribute.type, stat_attribute.TYPE_FILE)",
"def setStatiFile(self, filename):\n self.statiFile = filename",
"def add_attrib(self, key, func, func_args):\n if key in self.aux_attrib:\n raise KeyError(\"Attribute '{0}' already exists, please use 'set_attrib'.\".format(key))\n else:\n self.set_attrib(key, func, func_args)",
"def set_attr_values(self):\n ats = self.attributes # convenient short name\n for aid in ats:\n value = ats[aid]['nv'] if 'nv' in ats[aid] else (\n ats[aid]['value'] if 'value' in ats[aid] else None)\n if value is not None:\n# self.h5node.attrs[aid] = value\n #- self.file.file_pointer[self.full_path].attrs[aid] = value\n self.file.set_attribute(self.full_path, aid, value)\n #- self.file.h5save_attribute(self.full_path, aid, value)\n #- self.file.h5commands.append(\"set attribute(%s:%s)-%s\" % (self.full_path,\n #- aid, value))",
"def set_attrib(self, key, func, func_args):\n self.aux_attrib[key] = func\n self.aux_attrib_args[key] = func_args",
"def win_remove_user_file_path_permissions(file_path, username):\n # type: (str, str) -> None\n if not sys.platform.startswith(\"win\"):\n return None\n\n # 1. First we need to disable inheritance for this file and the directory\n args = [\"icacls.exe\", file_path, \"/inheritance:d\", \"/T\"]\n run_command_popen(args=args, shell=False, log_errors=True, logger_func=print)\n\n # 2. Then we remove the permissions for the user so only admin has permission to read\n args = [\"icacls.exe\", file_path, \"/remove\", username, \"/T\"]\n run_command_popen(args=args, shell=False, log_errors=True, logger_func=print)",
"def testGetAttributes(self):\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_FILE,\n location='/a_directory/a_file', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n self.assertIsNone(file_entry._attributes)\n\n file_entry._GetAttributes()\n self.assertIsNotNone(file_entry._attributes)\n self.assertEqual(len(file_entry._attributes), 1)\n\n test_attribute = file_entry._attributes[0]\n self.assertIsInstance(test_attribute, hfs_attribute.HFSExtendedAttribute)\n self.assertEqual(test_attribute.name, 'myxattr')\n\n test_attribute_value_data = test_attribute.read()\n self.assertEqual(test_attribute_value_data, b'My extended attribute')",
"def set_attr(self, aid, value, custom=False):\n if aid not in self.attributes and not custom:\n # print \"** Warning: non-declaired attribute %s['%s'] set to:\\n'%s'\" % (\n # self.name, aid, value)\n self.remember_custom_attribute(self.name, aid, value)\n self.attributes[aid] = {}\n else:\n # TODO: validate data_type\n pass\n self.attributes[aid]['nv'] = value\n # self.h5node.attrs[aid] = value\n #- self.file.file_pointer[self.full_path].attrs[aid] = value\n self.file.set_attribute(self.full_path, aid, value)",
"def process_single(fileinfos, args):\n # does the file name exist?\n if is_filename(args.name):\n check_filename(args.name, fileinfos)\n # does the attribute name exist?\n elif is_attname(args.name):\n check_attname(args.name, fileinfos)\n # does the element name exist? \n elif args.name != \"\":\n check_elname(args.name, fileinfos)\n \n count_and_draw(fileinfos, args, args.name)",
"def set_attr(zone, attr, line):\n zone.set_attr(attr, line[attr])",
"def modify_image_attribute(self, image_id, attribute='launchPermission',\r\n operation='add', user_ids=None, groups=None,\r\n product_codes=None):\r\n params = {'ImageId' : image_id,\r\n 'Attribute' : attribute,\r\n 'OperationType' : operation}\r\n if user_ids:\r\n self.build_list_params(params, user_ids, 'UserId')\r\n if groups:\r\n self.build_list_params(params, groups, 'UserGroup')\r\n if product_codes:\r\n self.build_list_params(params, product_codes, 'ProductCode')\r\n return self.get_status('ModifyImageAttribute', params, verb='POST')",
"def remove_attributes(cube, field, filename):\n cube.attributes = None",
"def set_touched(self, file: str, cmd_call: str):\n if file is None or file.upper() == 'ALL' or file in util.OPTIONS_CUI_TYPES:\n for _f, _d in self.inputs['files'].items():\n _d[cmd_call] = {file: True} if file in util.OPTIONS_CUI_TYPES else True\n else:\n print(self.inputs['files'])\n self.inputs['files'][file][cmd_call] = True",
"def AssignAttributes(self, attr):\r\n \r\n self.SetAttributes(attr)\r\n self._ownsAttr = True",
"def StripAttributes(path):\n\n if not path.endswith((\"/\", \"\\\\\")):\n path += \"\\\\\"\n folders = [path]\n allf = []\n while folders:\n folder = folders.pop(0)\n allf.append(folder)\n for lister in os.listdir(folder):\n if os.path.isdir(folder + lister):\n folders.append(folder + lister + \"\\\\\")\n else:\n AttribFile(folder + lister)\n\n return tuple(allf)",
"def attrControlGrp(*args, annotation: Union[AnyStr, bool]=\"\", attribute: Union[name, bool]=None,\n changeCommand: Union[Script, bool]=None, enable: bool=True, exists:\n bool=True, handlesAttribute: Union[name, bool]=None, hideMapButton:\n bool=True, label: Union[AnyStr, bool]=\"\", preventOverride: bool=True, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass",
"def _write_ce_collector_attributes_file(self, attributes_file):\n attributes_file_contents = (\n \"# Do not edit - file generated by osg-configure\\n\"\n + self.ce_attributes_str + \"\\n\"\n )\n return utilities.atomic_write(attributes_file, attributes_file_contents)",
"def set(module: str, attribute: str, value: object) -> int:\n _manageWebReplFile(module, attribute, value) # Can not be at _manageAttribute's mode == \"w\" branch: too deep.\n return _manageAttribute(module, attribute, \"w\", value)",
"def set_attr(self):\n\n # Create a new array\n self.fileh.create_array('/', 'array', self.a1)\n for i in range(self.nobjects):\n # Set an attribute\n setattr(self.fileh.root.array.attrs, \"attr\" + str(i), str(self.a1))\n # Put a mark\n self.fileh.mark()\n # Unwind all marks sequentially\n for i in range(self.niter):\n t1 = clock()\n for i in range(self.nobjects):\n self.fileh.undo()\n if verbose:\n print(\"u\", end=' ')\n if verbose:\n print()\n undo = clock() - t1\n # Rewind all marks sequentially\n t1 = clock()\n for i in range(self.nobjects):\n self.fileh.redo()\n if verbose:\n print(\"r\", end=' ')\n if verbose:\n print()\n redo = clock() - t1\n\n print(\"Time for Undo, Redo (set_attr):\", undo, \"s, \", redo, \"s\")",
"def touch(file_path: str) -> None:\n try:\n os.utime(file_path, None)\n except Exception:\n open(file_path, 'a').close()",
"def __init__(self, fileName):\n reader = pcssTools.PcssFileReader(fileName)\n lines = reader.getLines()\n self._attributes = {}\n inputCounter = 0\n outputCounter = 0\n for line in lines:\n\n [name, attributeType, optional, niceName, featureClass, io] = line.split('\\t')\n att = PcssFileAttribute(name, attributeType, optional, niceName, featureClass, io)\n if (att.isInputAttribute()):\n att.setInputOrder(inputCounter)\n inputCounter += 1\n if (att.isOutputAttribute()):\n att.setOutputOrder(outputCounter)\n outputCounter += 1\n self.setFileAttribute(att)",
"def readAttributesFile(self, filepath):\n raw_data = np.genfromtxt(filepath, skip_header=1, delimiter=\",\", filling_values=0, dtype=None)\n data = [list(item)[1:] for item in raw_data]\n\n self.attributeMatrix = np.asmatrix(data)\n n = self.attributeMatrix.shape[1]\n self.location = self.attributeMatrix[:, 0:2]\n self.location = self.location.astype('float')\n self.pop = self.attributeMatrix[:, 2:n].astype('int')\n # self.pop[np.where(self.pop < 0)[0], np.where(self.pop < 0)[1]] = 0\n self.n_group = n-2\n self.n_location = self.attributeMatrix.shape[0]\n self.pop_sum = np.sum(self.pop, axis=1)\n self.tract_id = np.asarray([x[0] for x in raw_data]).astype(str)\n self.tract_id = self.tract_id.reshape((self.n_location, 1))\n\n return self.attributeMatrix",
"def GetAttrib( self ):\n attrib = GameNodePath.GetAttrib( self )\n \n if self.GetModified():\n attrib['path'] = self.GetPath()\n \n return attrib",
"def _setAttributes(self, primaryAttr, attrs):\n return False",
"def touch_file(file_name):\n os.utime(file_name, None)",
"def touch(path, atime=None, mtime=None):\n assert ((atime is None) == (mtime is None)), 'atime and mtime are exclusive'\n if atime is None:\n times = None\n else:\n times = (atime, mtime)\n with open(path, 'ab+'):\n # Note: there is a race condition here.\n os.utime(path, times=times)",
"def update_file(filename, items):\n # TODO: Implement something in the templates to denote whether the value\n # being replaced is an XML attribute or a value. Perhaps move to dyanmic\n # XML tree building rather than string replacement.\n should_escape = filename.endswith('addon.xml')\n\n with open(filename, 'r') as inp:\n text = inp.read()\n\n for key, val in items.items():\n if should_escape:\n val = saxutils.quoteattr(val)\n text = text.replace('{%s}' % key, val)\n output = text\n\n with open(filename, 'w') as out:\n out.write(output)",
"def del_attrib(self, key):\n self.aux_attrib.pop(key)\n self.aux_attrib_args.pop(key)"
] | [
"0.5261392",
"0.49339035",
"0.4924765",
"0.4917284",
"0.49015188",
"0.48957682",
"0.48640934",
"0.48483077",
"0.46952525",
"0.46852258",
"0.4674856",
"0.4672482",
"0.4654802",
"0.46223906",
"0.46107024",
"0.46076995",
"0.45837176",
"0.45700586",
"0.45637625",
"0.45579827",
"0.45542854",
"0.4545037",
"0.45377535",
"0.45256865",
"0.45168254",
"0.45162034",
"0.45058545",
"0.44843227",
"0.44724268",
"0.44671586"
] | 0.7758203 | 0 |
StripAttribute(path) Removes all unwanted attributes from files in path. | def StripAttributes(path):
if not path.endswith(("/", "\\")):
path += "\\"
folders = [path]
allf = []
while folders:
folder = folders.pop(0)
allf.append(folder)
for lister in os.listdir(folder):
if os.path.isdir(folder + lister):
folders.append(folder + lister + "\\")
else:
AttribFile(folder + lister)
return tuple(allf) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_attributes(cube, field, filename):\n cube.attributes = None",
"def strip_attributes(arff_file):\r\n start = arff_file.find('% filename')\r\n new_arff = arff_file[start:]\r\n return new_arff",
"def remove_attributes(self, remove_attrs):\n remove = []\n for attr in self.data:\n for prefix in remove_attrs:\n if attr.startswith(prefix):\n remove.append(attr)\n break\n\n self.data = self.data.drop(remove, axis=1)",
"def remove_attributes(self, remove_attrs):\n remove = []\n for attr in self.data:\n for prefix in remove_attrs:\n if attr.startswith(prefix):\n remove.append(attr)\n break\n\n self.data = self.data.drop(remove, axis=1)",
"def remove_attribute(self, name):\n\n pass",
"def clean_path(file_path):\n\n pass",
"def remove_attribute(self, attribute):\n if attribute in self.attributes:\n self.attributes.remove(attribute)\n self.attribute_list.remove(attribute)\n return self",
"def remove_attribute(self, attribute_key):\n self.attributes.__delitem__(attribute_key) # delete the input key-value pair",
"def remove_attribute(self, attribute_key):\n self.attributes.__delitem__(attribute_key) # delete the input key-value pair",
"def remove_attribute(self, attribute_key):\n self.attributes.__delitem__(attribute_key) # delete the input key-value pair",
"def test_remove_a_single_attribute(self):\n pass",
"def removeAttr(self, *args):\n return _libsbml.XMLToken_removeAttr(self, *args)",
"def remove_attribute(self, attribute: str) -> None:\n attr_index = self.__attr_index(attribute)\n if attr_index is not None:\n self.yaml_node.value.pop(attr_index)",
"def remove_attribute(self, attribute) -> None:\n logging.info(f\"remove element attribute. {self.desc}\")\n js = f\"\"\"var elm = document.querySelectorAll(\"{self.css}\")[{self.index}];\n elm.removeAttribute(\"{attribute}\");\"\"\"\n self._execute_javascript(js)",
"def remove_attr(self, event: Union[wx.CommandEvent, None],\n attr_id: Union[int, None]) -> None:\n self.attr_buttons.pop(attr_id).Destroy()\n self.attr_values.pop(attr_id).Destroy()\n self.attr_labels.pop(attr_id).Destroy()\n attr_label = self.attr_ids.pop(attr_id)\n if attr_label != '':\n self.element.attr.pop(attr_label)\n if event is not None:\n self._update_attr_list()",
"def removeattribute(self, uid, field):\n\n raise NotImplementedError",
"def clear_attrs(self):\n self._attributes.clear()",
"def remove(path):",
"def attr_remove(self):\n def _del_if_in(obj, attr):\n if attr in obj:\n del obj[attr]\n if self._modifier_exists(REMOVE_KEY):\n to_remove = self[CONFIG_KEY][SAMPLE_MODS_KEY][REMOVE_KEY]\n _LOGGER.debug(\"Removing attributes: {}\".format(to_remove))\n for attr in to_remove:\n [_del_if_in(s, attr) for s in self.samples]",
"def remove(self, attr: str):\n self._includes.remove(attr)\n self._regex = None",
"def __rm_file_attributes(file_contents: str) -> str:\n\n result = re.sub(COBOL_FORMAT_RM_RECORDING_MODE_REGEX, ' ', file_contents)\n result = re.sub(COBOL_FORMAT_RM_BLOCK_CONTAINS_REGEX, ' ', result)\n result = re.sub(COBOL_FORMAT_RM_RECORD_CONTAINS_REGEX, ' ', result)\n result = re.sub(COBOL_FORMAT_RM_LABEL_REGEX, ' ', result)\n\n return result",
"def del_attrib(self, key):\n self.aux_attrib.pop(key)\n self.aux_attrib_args.pop(key)",
"def remove_attr(self, event: Union[wx.CommandEvent, None],\n attr_id: Union[int, None]) -> None:\n self.attr_req_buttons.pop(attr_id).Destroy()\n self.attr_req_elements.pop(attr_id).Destroy()\n self.attr_req_labels.pop(attr_id).Destroy()\n attr_label = self.attr_req_ids.pop(attr_id)\n if attr_label != '':\n self.attr_requirements[self.element].pop(attr_label)\n if event is not None:\n self._update_attr_list()",
"def clean_path(path):\n return resolved_path(path)",
"def strip_path(self):\n return self.path.replace('/', '')",
"def reset_attr(self, server, attribute):\n\t\tattribute = str(attribute)\n\t\tcfg = self.get_cfg(server)\n\t\tif cfg:\n\t\t\treturn cfg.pop(attribute, None)",
"def stripBlacklistAttrs(attrs, blacklist):\n gb = FnAttribute.GroupBuilder()\n gb.update(attrs)\n\n for attrName in blacklist:\n gb.delete(attrName)\n\n return gb.build()",
"def delete_attributes(self, attribute_list):\n with LayerEditingManager(self.layer, 'Remove attributes', DEBUG):\n # remove attributes\n layer_pr = self.layer.dataProvider()\n print \"REMOVING %s\" % attribute_list\n #TODO fix this\n print \"TODO fix ProcessLayer.delete_attributes()\"\n print \"this attributes should be deleted: %s\" % attribute_list\n #return layer_pr.deleteAttributes(attribute_list)",
"def _trim_path(path):\n if path.endswith(\"/\"):\n path = path[:-1] # remove / at the end\n \n return path",
"def removeAttributeByIndex(self, index):\n\n if self._checkAttributeIndex(index) is not True:\n return False\n\n del self._attributes[index]\n\n return True"
] | [
"0.63845766",
"0.6220756",
"0.6163511",
"0.6163511",
"0.6080287",
"0.5801426",
"0.5746101",
"0.57427835",
"0.57427835",
"0.57427835",
"0.5729805",
"0.5704866",
"0.5638217",
"0.5624947",
"0.5587875",
"0.5585241",
"0.5573176",
"0.55374366",
"0.5532499",
"0.5501856",
"0.5465265",
"0.54516745",
"0.54327506",
"0.53620607",
"0.5351809",
"0.5340758",
"0.5309963",
"0.53016454",
"0.53013635",
"0.52653694"
] | 0.6896639 | 0 |
StripFolder(path) Brings all files within all subfolders to the root ('path'). Deletes all subfolders of the main path. Returns a tuple of all the subfolders that were copied over. | def StripFolder(path):
if not path.endswith(("/", "\\")):
path = path + "\\"
folders = [path]
allf = []
while folders:
folder = folders.pop(0)
allf.append(folder)
for lister in os.listdir(folder):
if os.path.isdir(folder + lister):
folders.append(folder + lister + "\\")
elif not path == folder:
CopyFolder(folder, path)
shutil.rmtree(folder)
return tuple(allf) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ExtractFolder(path):\n\n if not path.endswith((\"/\", \"\\\\\")):\n path = path + \"\\\\\"\n folders = []\n files = []\n for file in os.listdir(path):\n files.append(path + file)\n _file, ext = GetName(file)\n folder = ExtractFile(path + file)\n CopyFolder(folder, path + _file)\n folders.append(path + _file)\n\n DeleteFile(*files)\n return tuple(folders)",
"def clean_folder(path):\n if not os.path.exists(path):\n os.makedirs(path)\n else:\n shutil.rmtree(path)\n os.makedirs(path)",
"def _unstage_folder(dir_path):\n for dir_item in os.listdir(dir_path):\n full_path = os.path.join(dir_path, dir_item)\n if os.path.isfile(full_path) and dir_item != 'load.go':\n os.remove(full_path)",
"def delete_empty_folders(folder_path):\n if not os.path.exists(folder_path) or not os.path.isdir(folder_path):\n # Bail early (silently)\n return\n\n # Delete empty subfolders first\n for item in os.scandir(folder_path):\n if item.is_dir():\n delete_empty_folders(item.path)\n\n # Remove top folder\n try:\n os.rmdir(folder_path)\n except OSError:\n pass",
"def delete_folder(path: str) -> None:\n\tuux.show_info(\"Deleting \" + path)\n\n\tif not os.path.exists(path):\n\t\t# Path does not exist\n\t\treturn\n\n\ttry:\n\t\tshutil.rmtree(path, True)\n\texcept OSError as ex:\n\t\tuux.show_warning(\"Failed to delete directory, \" + os.strerror(ex.errno))",
"def remove_folder_recursively(folder_path):\n\n print()\n message : str = \"removing folder\"\n pretty_print_value(folder_path,message,Fore.YELLOW)\n print()\n\n command = \"rm -rf \\\"\" + folder_path + \"\\\"\"\n print(command)\n os.system(command)\n ...",
"def remove_dir_content(path):\n for item in os.listdir(path):\n p = os.path.join(path, item)\n if os.path.isdir(p):\n shutil.rmtree(p)\n else:\n os.unlink(p)",
"def removedirs (folder, silent = False) :\n file, rep = [], []\n for r, d, f in os.walk (folder) :\n for a in d : \n rep.append (os.path.join (r, a))\n for a in f : \n file.append (os.path.join (r, a))\n impos = []\n file.sort ()\n rep.sort (reverse = True)\n for f in file :\n try :\n if os.path.exists (f):\n os.remove (f)\n except Exception as e :\n fLOG (\"unable to remove file\", f, \" --- \", str(e).replace(\"\\n\", \" \"))\n if silent : impos.append (f)\n else : raise \n for f in rep :\n try :\n if os.path.exists (f):\n os.removedirs (f)\n except Exception as e :\n fLOG (\"unable to remove folder\", f, \" --- \", str(e).replace(\"\\n\", \" \"))\n if silent : impos.append (f)\n else : raise \n \n if os.path.exists (folder) :\n try :\n os.rmdir(folder)\n except Exception as e:\n impos.append(folder)\n return impos",
"def delete_folder(folder_path):\r\n if os.path.exists(folder_path):\r\n shutil.rmtree(folder_path)",
"def delete_folder(folder_path):\n shutil.rmtree(folder_path)",
"def _safe_clear_dirflow(path):\n print(\"Clearing {}...\".format(path))\n assert os.path.isdir(path), \"Didn't pass a folder to be cleaned\"\n list_dir = [f for f in os.listdir(path) if not f.startswith('.')]\n for folder in list_dir:\n cat_folder = os.path.join(path, folder)\n assert os.path.isdir(cat_folder), \\\n \"Dir contains Non-Folder File!\"\n cat_folder_item = [f for f in os.listdir(cat_folder)\n if not f.startswith('.')]\n for file in cat_folder_item:\n # For every file, confirm is PNG or error.\n # DONT DELETE YET, IN CASE OF ERRORS!\n assert \".png\" in file, \"Folder has Non PNG Contents!\"\n # If we got though that with no error, then now we can delete!\n # for folder in os.listdir(the_path):\n # cat_folder = os.path.join(the_path, folder)\n # for file in os.listdir(cat_folder):\n # os.remove(os.path.join(cat_folder, file))\n # os.rmdir(cat_folder)\n # os.rmdir(the_path)\n return True",
"def clean_all_folder():\n LOGGER.warning('removal of old files has been temporarily disabled')\n # paths_to_clean = CFG.remove_files\n # if paths_to_clean: # pylint: disable=using-constant-test\n # for remove_config in paths_to_clean: # pylint: disable=not-an-iterable\n # name = tuple(remove_config.keys())[0]\n # LOGGER.info(f'processing: {name}')\n # remove_config = remove_config[name]\n # if 'folder' not in remove_config.keys():\n # LOGGER.error(f'missing \"folder\" in {name}')\n # return\n # if 'age' not in remove_config.keys():\n # LOGGER.error(f'missing \"age\" in {name}')\n # return\n # if not os.path.exists(remove_config['folder']):\n # LOGGER.error(f'path does not exist: {remove_config[\"folder\"]}')\n # return\n # _remove_old_files_from_folder(**remove_config)\n # else:\n # LOGGER.debug('no folder to clean')",
"def delete_tempfolder(path):\n try:\n rmtree(path)\n except:\n pass",
"def stripDirs(path, count):\n # TODO: This is a hack and not robust.\n parts = path.split(os.sep)\n return os.sep.join(parts[count:])",
"def split_path(self, path):\n path = os.path.splitdrive(path)[1][1:]\n folders = []\n while 1:\n path, folder = os.path.split(path)\n if folder != \"\" and folder:\n folders.append(folder)\n if len(path) == 0:\n return folders[::-1]\n else:\n if path != \"\" and path:\n folders.append(path)\n break\n folders.reverse()\n return folders",
"def delete_folder(path):\n command = ['rm', '-rf', TEST_DIR]\n file_operation(path, command)",
"def removeFolder(self):\n selectedItems = self.fileDir.selectedItems()\n if selectedItems:\n replay = QtWidgets.QMessageBox.question(self, 'Question',\n 'Are you sure, you want to remove the folder?',\n QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.Cancel)\n logger.warning('Are you sure you want to remove the folder?')\n\n if replay == QtWidgets.QMessageBox.Yes:\n for eachItem in selectedItems:\n folderPath = str(eachItem.toolTip(0))\n try:\n os.chmod(folderPath, 0777)\n shutil.rmtree(folderPath)\n logger.info('Remove folder Path: \\t%s !' % folderPath)\n except Exception, result:\n logger.warning(result)\n\n # refresh folders structure\n self.removeExistWidget(self.animWidgetLayout)\n self.loadFolderStructure(self.dataDir)\n else:\n QtWidgets.QMessageBox.warning(self, 'Warning',\n 'No folder selected!\\nPlease select the folder!',\n QtWidgets.QMessageBox.Ok)\n logger.warning('No folder selected!\\nPlease select the folder!')",
"def delete_all(path, delete_root_dir=False):\n if not os.path.isdir(path):\n return\n files=os.listdir(path)\n for x in files:\n fullpath=os.path.join(path, x)\n if os.path.isfile(fullpath):\n os.remove(fullpath)\n elif os.path.isdir(fullpath):\n delete_all(fullpath, True)\n if delete_root_dir:\n os.rmdir(path)",
"def remove_indiv_files(path):\n if isinstance(path, FSMap):\n path.fs.delete(path.root, recursive=True)\n else:\n fname, ext = os.path.splitext(path)\n if ext == '.zarr':\n shutil.rmtree(path)\n else:\n os.remove(path)",
"def clean_list(path):\n # Remove directories \n clean_file_list = [f for f in os.listdir(path)\n if os.path.isfile(os.path.join(path, f))]\n\n # List files to ignore\n bad_files = ['desktop.ini',\n os.path.basename(__file__)]\n # TODO: Ignore hidden files & self when compiled\n\n # Loop through bad files and remove from list\n for found_file in bad_files:\n if found_file in clean_file_list:\n clean_file_list.remove(found_file)\n return clean_file_list",
"def clean_folder(path):\n shutil.rmtree(path + \"/__pycache__\", ignore_errors=True)\n\n folders = glob.glob(path + \"/*/\")\n for folder in folders:\n shutil.rmtree(folder + \"/__pycache__\", ignore_errors=True)\n\n clean_folder(folder)\n\n cython_exts = glob.glob(folder + \"/*.cpp\")\n cython_exts.extend(glob.glob(folder + \"/*.cpython*\"))\n for file in cython_exts:\n os.remove(file)",
"def clean_folder(path):\n shutil.rmtree(path + \"/__pycache__\", ignore_errors=True)\n\n folders = glob.glob(path + \"/*/\")\n for folder in folders:\n shutil.rmtree(folder + \"/__pycache__\", ignore_errors=True)\n\n clean_folder(folder)\n\n cython_exts = glob.glob(folder + \"/*.cpp\")\n cython_exts.extend(glob.glob(folder + \"/*.cpython*\"))\n for file in cython_exts:\n os.remove(file)",
"def delete_dir(path, include_root=True):\r\n for root, dirs, files in os.walk(path, topdown=False):\r\n for name in files:\r\n os.remove(os.path.join(root, name))\r\n for name in dirs:\r\n os.rmdir(os.path.join(root, name))\r\n if include_root:\r\n os.rmdir(path)",
"def delete_files_from_folder(path: str) -> None:\n for the_file in os.listdir(path):\n file_path = os.path.join(path, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n except Exception as e:\n print(e)",
"def get_folder_filenames(path):\n result = {}\n for parent, dirnames, filenames in os.walk(path):\n if not dirnames:\n result[os.path.split(parent)[-1]] = filenames\n return result",
"def delete_all_files_in_folder(folder_path: str) -> None:\n if os.path.isdir(folder_path):\n [FileHandler.delete_file(path=path) for path in glob(os.path.join(folder_path, \"*\"))]\n else:\n raise Exception(\"Not a valid path for a folder\")",
"def list_folders(path):\n return (name for name in os.listdir(path)\n if os.path.isdir(os.path.join(path, name)))",
"def _clean_path(self, pathToRemove, files):\n result = []\n for filePath in files:\n filePath = string.split(filePath, pathToRemove)\n filePath = filePath[1]\n filePath = string.split(filePath, os.sep)\n if filePath[0] == '':\n filePath.remove('')\n fileName = string.join(filePath, os.sep)\n result.append(fileName)\n return result",
"def simplifyPath(self, path):\n pwd = [] # stack, present working directory\n path = path.split(\"/\")\n for curr in path:\n if not curr or curr == \".\": # skip current dir\n continue\n elif curr == \"..\":\n if pwd: # if we're not in the root directory, go back\n pwd.pop()\n else:\n pwd.append(curr)\n return \"/\" + \"/\".join(pwd)",
"def removeFolder(foldername):\n \n #if folder exists\n if os.path.exists(foldername):\n # recursively remove files in folder\n for root, dirs, files in os.walk(foldername, topdown=False):\n for name in files:\n os.remove(os.path.join(root, name))\n for name in dirs:\n os.rmdir(os.path.join(root, name))\n \n # delete folder itself\n os.rmdir(foldername) \n # end removeFolder()"
] | [
"0.7029333",
"0.60177916",
"0.59128374",
"0.5911949",
"0.5794489",
"0.57633936",
"0.57363594",
"0.57268214",
"0.5675963",
"0.56686217",
"0.5616837",
"0.55706525",
"0.5567647",
"0.5564295",
"0.5555438",
"0.55062664",
"0.5492872",
"0.5459668",
"0.5448241",
"0.54460824",
"0.54331386",
"0.54331386",
"0.54231375",
"0.5419802",
"0.54036725",
"0.5402826",
"0.53909916",
"0.5334818",
"0.52885365",
"0.5285667"
] | 0.8126114 | 0 |
CallSkipMod(mod) Prints a missing mod warning using 'mod' as the missing file. Always returns 0. | def CallSkipMod(mod):
if len(var.MOD_LOCATION) == 1:
iner = "ONE_IN"
else:
iner = "MULT_IN_ONE"
file = getattr(fl, mod)
if "{0}" in file:
file = file.format(1) # make sure it does say *something*
log.logger("PARS_SKIP", format=[mod, file, iner, "', '".join(var.MOD_LOCATION)])
return 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def shall_skip(module):\n # skip it, if there is nothing (or just \\n or \\r\\n) in the file\n return path.getsize(module) < 3",
"def skipped (func):\n try:\n from nose.plugins.skip import SkipTest\n\n def skipme (*a, **k):\n raise SkipTest()\n\n skipme.__name__ = func.__name__\n return skipme\n except ImportError:\n # no nose, we'll just skip the test ourselves\n def skipme (*a, **k):\n print \"Skipping\", func.__name__\n\n skipme.__name__ = func.__name__\n return skipme",
"def import_fail_info(mod_name,fns=None):\n\n if fns == None:\n warn(\"Loading of %s failed.\\n\" % (mod_name,))\n else:\n warn(\"Loading of %s from %s failed.\\n\" % (fns,mod_name))",
"def skip_test(reason):\n global test_name_text\n print \"SKIP {}: {}\\n\".format(test_name_text, reason)\n sys.exit(0)",
"def test_invalid_python_no_exec_zero_args(self):\n\n data_file = testutils.DataFile(\"integration_module_invalid_noexecargs\")\n\n rtn = self.run_cmd(\"pm install --force --single module --install_name test_noexecargs --name %s --auto\" % str(data_file))\n assert(rtn.return_code == 0)\n\n rtn = self.run_cmd(\"test_noexecargs test\")\n\n assert(rtn.return_code == 242)",
"def test_mod_not_reported(self):\n override_acl(self.user, {'can_moderate_private_threads': 1})\n\n response = self.client.get(self.api_link)\n self.assertEqual(response.status_code, 404)",
"def skip_or_run_error_extra_info_test(func):\n\n return skip_or_run_test_tarantool(func, '2.4.1',\n 'does not provide extra error info')",
"def skip_require():\n global ignore_once\n ignore_once = True",
"def addSkip(self, test, err=\"\"):\n\n self.logger.info(\"\\r\\x1b[33;1mSKIP\\x1b[0m \" + test.shortDescription() + \"\\n\")\n if err: self.logger.info(\"\\t\\t%s\\n\" % err)",
"def run_skip(self):\n pass",
"def skip(func):\n return",
"def module_test(mod=None):\r\n func_list = [item[1] for item in mod.__dict__.items() if isinstance(item[0],str) and item[0].startswith('test')]\r\n sys.stderr.write(\"\\n\".join([func.__name__ + \": \" + (\"PASS\" if func() else \"FAIL\") for func in func_list] + [ func.__doc__ for func in func_list if func.__doc__ is not None]))\r\n sys.stderr.write(\"\\n\")",
"def test_import_skip_skips(self):\n try:\n import badmodule\n self.fail(\n \"Expected ImportError for %r, nothing raised.\" % (badmodule,))\n except ImportError as import_error:\n self.assertRaises(SkipTest, import_skip, import_error, 'badmodule')",
"def should_skip_file(name):\n if name.startswith('.'):\n return 'Skipping hidden file %(filename)s'\n if name.endswith('~') or name.endswith('.bak'):\n return 'Skipping backup file %(filename)s'\n if name.endswith('.pyc') or name.endswith('.pyo'):\n return 'Skipping %s file ' % os.path.splitext(name)[1] + '%(filename)s'\n if name.endswith('$py.class'):\n return 'Skipping $py.class file %(filename)s'\n if name in ('CVS', '_darcs'):\n return 'Skipping version control directory %(filename)s'\n return None",
"def test_taskmod_no_taskfile(modpath):\n sys.meta_path.append(TaskImporter())\n task = import_module(modpath)\n assert modpath in sys.modules\n assert sys.modules[modpath] is task\n assert task.__taskmodules__ == []",
"def skip ( nEvents ) :\n st = SUCCESS \n with DisabledAlgos() :\n st = run ( nEvents )\n \n return st",
"def test_skip_with_decorator_and_reason():\n pass",
"def _validate_mod(self, mod: Modifier):\r\n return not mod.name in self.mods",
"def add_mod(self, mod_name: str) -> None:\n if mod_name + \"_module\" not in self.modules:\n self.modules[mod_name + \"_module\"] = None\n if \"mod_\" + mod_name + \".c\" not in self.modules:\n self.modules[\"mod_\" + mod_name + \".c\"] = None",
"def skip(problem):\n click.echo(\"Current problem is problem %i.\" % problem)\n generate(problem + 1, prompt_default=False)",
"def __skips(self, fichier):\n \n f = fichier \n fexist = self.files[f]['filexist']\n\n if not fexist:\n if f not in self.skips: \n self.warn('[fic] %s, innexistant' % f)\n self.skips.append(f)\n else:\n self.info('[fic] %s, existe' % f)\n self.info('[fic] %s, revision initiale %s, derniere revision %s' % (f, self.files[f]['rcsfirstrev'], self.files[f]['rcslastrev']))\n return None",
"def unavailable_importer(**kwargs):\n return LazyImportTester(\"_qiskit_this_module_does_not_exist_\", **kwargs)",
"def skip_or_run_error_ext_type_test(func):\n\n return skip_or_run_test_tarantool(func, '2.10.0',\n 'does not support error extension type')",
"def test_IMOD_pass(self):\n self.assertTrue(self.mod.isset)",
"def NoModulator(*args, **kwargs):\n return None",
"def test_findtasks_none(monkeypatch, modpath):\n monkeypatch.setattr(loadlimit.importhook, 'lstaskfiles', fake_lstaskfiles)\n # monkeypatch.setattr(loadlimit.importhook, 'SourceFileLoader',\n # FakeModuleWithTasks)\n monkeypatch.setattr(loadlimit.importhook, 'SourceFileLoader',\n FakeSourceFileLoader)\n\n taskfile = 'a_0.py'\n\n sys.meta_path.append(TaskImporter(taskfile))\n taskmod = import_module(modpath)\n\n assert hasattr(taskmod, '__tasks__')\n assert taskmod.__tasks__ == []",
"def skip(*, reason):\n return unittest.skip(reason)",
"def test_bad_file() -> None:\n\n bad = random_string()\n rv, out = getstatusoutput(f'{RUN} {bad}')\n assert rv != 0\n assert out.lower().startswith('usage:')\n assert re.search(f\"No such file or directory: '{bad}'\", out)",
"def skip(self):\n\tglobal message\n\tmessage=5",
"def test_import_skip_reraises(self):\n try:\n import badmodule\n self.fail(\n \"Expected ImportError for %r, nothing raised.\" % (badmodule,))\n except ImportError as import_error:\n err = self.assertRaises(\n ImportError, import_skip, import_error, 'nothing')\n self.assertEqual(err, import_error)"
] | [
"0.5755264",
"0.56256",
"0.5601252",
"0.5555863",
"0.5437181",
"0.5423412",
"0.5353894",
"0.5249999",
"0.5243704",
"0.52432823",
"0.5241218",
"0.5223273",
"0.5221188",
"0.5192148",
"0.51250154",
"0.5081649",
"0.5074118",
"0.50586987",
"0.50229686",
"0.5008486",
"0.50062305",
"0.4979753",
"0.49586645",
"0.49495217",
"0.49489045",
"0.49459234",
"0.49193344",
"0.49099472",
"0.49089786",
"0.4908575"
] | 0.8526851 | 0 |
ARN of an App Runner automatic scaling configuration resource that you want to associate with your service. If not provided, App Runner associates the latest revision of a default auto scaling configuration. | def auto_scaling_configuration_arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "auto_scaling_configuration_arn") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def auto_scaling_configuration_arn(self) -> Optional[str]:\n return pulumi.get(self, \"auto_scaling_configuration_arn\")",
"def auto_scaling_configuration_arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"auto_scaling_configuration_arn\")",
"def autoscaling(self) -> Optional[pulumi.Input['NodePoolAutoscalingArgs']]:\n return pulumi.get(self, \"autoscaling\")",
"def app_image_config_arn(self) -> Optional[str]:\n return pulumi.get(self, \"app_image_config_arn\")",
"def auto_scaling(self):\n return self.container['auto_scaling']",
"def get_auto_scaling_configuration_output(auto_scaling_configuration_arn: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAutoScalingConfigurationResult]:\n ...",
"def auto_scaling(self, auto_scaling):\n\n self.container['auto_scaling'] = auto_scaling",
"def generate_arn(self):\n if self._generate_arn is None:\n self._generate_arn = functools.partial(\n generate_arn,\n self.resource_type.arn_service or self.resource_type.service,\n region=not self.resource_type.global_resource and self.config.region or \"\",\n account_id=self.account_id,\n resource_type=self.resource_type.arn_type,\n separator=self.resource_type.arn_separator)\n return self._generate_arn",
"def resource_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_arn\")",
"def resource_arn(self) -> Optional[str]:\n return pulumi.get(self, \"resource_arn\")",
"def set_time_based_auto_scaling(InstanceId=None, AutoScalingSchedule=None):\n pass",
"def resource_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_arn\")",
"def resource_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_arn\")",
"def resource_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_arn\")",
"def resource_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_arn\")",
"def resource_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_arn\")",
"def resource_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_arn\")",
"def resource_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_arn\")",
"def resource_arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_arn\")",
"def get_auto_scaling_configuration(auto_scaling_configuration_arn: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAutoScalingConfigurationResult:\n __args__ = dict()\n __args__['autoScalingConfigurationArn'] = auto_scaling_configuration_arn\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('aws-native:apprunner:getAutoScalingConfiguration', __args__, opts=opts, typ=GetAutoScalingConfigurationResult).value\n\n return AwaitableGetAutoScalingConfigurationResult(\n auto_scaling_configuration_arn=pulumi.get(__ret__, 'auto_scaling_configuration_arn'),\n auto_scaling_configuration_revision=pulumi.get(__ret__, 'auto_scaling_configuration_revision'),\n latest=pulumi.get(__ret__, 'latest'))",
"def autoscale_labels(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupIntegrationKubernetesAutoscaleLabelArgs']]]]:\n return pulumi.get(self, \"autoscale_labels\")",
"def automatic_resources(self) -> Optional[Sequence['outputs.AiEndpointDeployedModelAutomaticResource']]:\n return pulumi.get(self, \"automatic_resources\")",
"def autoscale_attributes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ElastigroupIntegrationEcsAutoscaleAttributeArgs']]]]:\n return pulumi.get(self, \"autoscale_attributes\")",
"def config_rule_arn(self) -> str:\n return pulumi.get(self, \"config_rule_arn\")",
"def config_rule_arn(self) -> str:\n return pulumi.get(self, \"config_rule_arn\")",
"def modify_aws_autoscaling(asg_name, action):\n logger.info('Modifying asg {} autoscaling to {} ...'.format(\n asg_name,\n action)\n )\n if not app_config['DRY_RUN']:\n\n if action == \"suspend\":\n response = client.suspend_processes(\n AutoScalingGroupName=asg_name,\n ScalingProcesses=['Launch', 'ReplaceUnhealthy'])\n elif action == \"resume\":\n response = client.resume_processes(\n AutoScalingGroupName=asg_name,\n ScalingProcesses=['Launch', 'ReplaceUnhealthy'])\n else:\n logger.info('Invalid scaling option')\n raise Exception('Invalid scaling option')\n\n if response['ResponseMetadata']['HTTPStatusCode'] != requests.codes.ok:\n logger.info('AWS asg modification operation did not succeed. Exiting.')\n raise Exception('AWS asg modification operation did not succeed. Exiting.')\n else:\n logger.info('Skipping asg modification due to dry run flag set')\n response = {'message': 'dry run only'}\n\n return response",
"def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n auto_scaling_configuration_arn: Optional[pulumi.Input[str]] = None,\n encryption_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceEncryptionConfigurationArgs']]] = None,\n health_check_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceHealthCheckConfigurationArgs']]] = None,\n instance_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceInstanceConfigurationArgs']]] = None,\n network_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceNetworkConfigurationArgs']]] = None,\n observability_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceObservabilityConfigurationArgs']]] = None,\n service_name: Optional[pulumi.Input[str]] = None,\n source_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceSourceConfigurationArgs']]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n __props__=None):\n ...",
"def vertical_pod_autoscaling(self) -> 'outputs.VerticalPodAutoscalingResponse':\n return pulumi.get(self, \"vertical_pod_autoscaling\")",
"def autoscale_down(self) -> Optional[pulumi.Input['OceanAutoscalerAutoscaleDownArgs']]:\n return pulumi.get(self, \"autoscale_down\")",
"def autoscaling_metric_specs(self) -> Optional[Sequence['outputs.AiEndpointDeployedModelDedicatedResourceAutoscalingMetricSpec']]:\n return pulumi.get(self, \"autoscaling_metric_specs\")"
] | [
"0.7602388",
"0.73555046",
"0.6526771",
"0.6416983",
"0.6149178",
"0.6127391",
"0.5787056",
"0.55833757",
"0.55405587",
"0.5494746",
"0.54689515",
"0.5468072",
"0.5468072",
"0.5468072",
"0.5468072",
"0.5468072",
"0.5468072",
"0.5468072",
"0.54050505",
"0.5402634",
"0.53579885",
"0.5330335",
"0.5270906",
"0.5258441",
"0.5258441",
"0.5256211",
"0.5238288",
"0.51987034",
"0.5192533",
"0.51813185"
] | 0.7562741 | 1 |
Settings of the health check that AWS App Runner performs to monitor the health of your service. See Health Check Configuration below for more details. | def health_check_configuration(self) -> Optional[pulumi.Input['ServiceHealthCheckConfigurationArgs']]:
return pulumi.get(self, "health_check_configuration") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def health_check_configuration(self) -> pulumi.Output['outputs.ServiceHealthCheckConfiguration']:\n return pulumi.get(self, \"health_check_configuration\")",
"def healthcheck(parameters): \n\n print(\"In healthcheck module\")",
"def health_check():\n app.logger.info(\"Health Check!\")\n return Response(\"All Good!\", status=200)",
"async def healthcheck(self):\n for service in self.services:\n await service.healthcheck()",
"def health_check():\n return dict(api_status='OK')",
"def _healthcheck():\n return '', 200",
"def health_check():\n # TODO: implement any other checking logic.\n return '', 200",
"def healthcheck(self):\n url = urljoin(self.url, \"/.well-known/healthcheck.json\")\n r = requests.get(url)\n return r.json()",
"def health_checks(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"health_checks\")",
"def health_checks(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"health_checks\")",
"def service_check(self, env):\n import params\n\n self.active_master_host = params.hawqmaster_host\n self.active_master_port = params.hawq_master_address_port\n self.checks_failed = 0\n self.total_checks = 2\n\n # Checks HAWQ cluster state\n self.check_state()\n\n # Runs check for writing and reading tables on HAWQ\n self.check_hawq()\n\n # Runs check for writing and reading external tables on HDFS using PXF, if PXF is installed\n if params.is_pxf_installed:\n self.total_checks += 1\n self.check_hawq_pxf_hdfs()\n else:\n Logger.info(\"PXF not installed. Skipping HAWQ-PXF checks...\")\n\n if self.checks_failed != 0:\n Logger.error(\"** FAILURE **: Service check failed {0} of {1} checks\".format(self.checks_failed, self.total_checks))\n sys.exit(1)\n\n Logger.info(\"Service check completed successfully\")",
"def health_check():\n ret = {\"Status\": 200, \"Msg\": \"Service is Up\"}\n return jsonify(ret)",
"def get_health_check(self):\n return util.create_response(output=\"OK\")",
"def health_check(name, target='TCP:22', healthy_threashold=2, unhealthy_threashold=3, interval=30, timeout=3):\n hc = HealthCheck(title=name + 'healthcheck')\n hc.HealthyThreshold = healthy_threashold\n hc.UnhealthyThreshold = unhealthy_threashold\n hc.Interval = interval\n hc.Target = target\n hc.Timeout = timeout\n return hc",
"def index():\n logging.debug('Healthy check.')\n pass # healthy check",
"def index():\n logging.debug('Healthy check.')\n pass # healthy check",
"def healthcheck():\n return make_response(jsonify(status=200, message='Healthy'), status.HTTP_200_OK)",
"def health_checks(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"health_checks\")",
"def health_check(self) -> Optional[str]:\n return pulumi.get(self, \"health_check\")",
"def health_check(self) -> Optional[str]:\n return pulumi.get(self, \"health_check\")",
"def health_check(self) -> Optional[str]:\n return pulumi.get(self, \"health_check\")",
"def health_check(self) -> Optional[str]:\n return pulumi.get(self, \"health_check\")",
"def health_check(self) -> Optional[str]:\n return pulumi.get(self, \"health_check\")",
"def health_check(self) -> Optional[str]:\n return pulumi.get(self, \"health_check\")",
"def health_check(self, *, scope: Scope) -> HealthCheckStatus:",
"def health_check(request):\n return Response(\"OK\",\n status=status.HTTP_200_OK)",
"def test_health(self) -> None:\n self._response = self._app.get('/health')\n\n self.assertEqual(self._response.status, '200 OK')",
"def check(job, logger, **kwargs):\n resources = Resource.objects.filter(\n attributes__field__name=\"health_check_config\",\n lifecycle='ACTIVE'\n ).distinct()\n set_progress(\n f\"Will run health checks for {resources.count()} resource(s): \"\n f\"{[resource.name for resource in resources]}\")\n\n check_results = []\n\n for resource in resources:\n logger.info(f\"Will run health checks for resource '{resource.name}'.\")\n config_dict = get_config_value(resource)\n failing_health_checks = 0\n\n # Run all the health checks configured for this resource.\n for health_check in config_dict.get('health_checks', {}):\n max_retries = health_check.get('max_retries', 3)\n retry_interval_seconds = health_check.get('retry_interval_seconds', 1)\n\n name = health_check.get('name')\n job.set_progress(f\"Beginning health check '{name}'.\")\n url = health_check.get('url')\n accepted_statuses = health_check.get('accepted_status_codes')\n timeout_seconds = health_check.get('timeout_seconds', 3)\n\n retry_attempts = 0\n while retry_attempts <= max_retries:\n try:\n if retry_attempts > 1:\n logger.info(f\"On retry attempt {retry_attempts}.\")\n status_code = requests.get(url, timeout=timeout_seconds).status_code\n\n if accepted_statuses and status_code not in accepted_statuses:\n # Failure.\n msg = (\n f\"HTTP Request returned {status_code}, \"\n f\"which is not in the accepted statuses: {accepted_statuses}\"\n f\"for health check '{name}'.\"\n )\n logger.debug(msg)\n retry_attempts += 1\n else:\n # Pass - We got a valid status. We can stop now.\n logger.info(f\"Health check '{name}' completed with success.\")\n break\n\n except Exception as e:\n # Bad, could be ConnectionError, which will count as a failure.\n logger.debug(e)\n retry_attempts += 1\n\n # Wait for the specified retry interval before trying again\n time.sleep(retry_interval_seconds)\n\n if retry_attempts == max_retries:\n job.set_progress(f\"Max retries exceeded for health check '{name}'.\")\n failing_health_checks += 1\n\n # Summarize this resource's health check results.\n data_dict = {\n 'time': datetime.datetime.now(),\n 'resource_id': resource.id,\n 'resource_name': resource.name,\n 'failing_health_checks': failing_health_checks,\n }\n\n check_results.append(data_dict)\n\n context = {\n \"health_check_results\": check_results,\n }\n\n # Return the dict to be processed by the \"Then\" action\n return 'SUCCESS', '', '', {'context': context}",
"def test_health_check(self):\n result = self.app.get('/v1/health')\n\n # assert the status code of the response 200 (OK)\n self.assertEqual(result.status_code, 200)\n self.assertEqual(result.data, b'UP')",
"def health(self) -> Dict[str, str]:\n return self.http.get(self.config.paths.health)"
] | [
"0.68419075",
"0.6347665",
"0.62836397",
"0.61189044",
"0.6003582",
"0.58736956",
"0.5865395",
"0.5828682",
"0.58054304",
"0.58054304",
"0.5802205",
"0.57770216",
"0.5776012",
"0.5757377",
"0.57560873",
"0.57560873",
"0.5734575",
"0.57197356",
"0.5709717",
"0.5709717",
"0.5709717",
"0.5709717",
"0.5709717",
"0.5709717",
"0.5699225",
"0.5693489",
"0.56704533",
"0.56331545",
"0.5601824",
"0.55552685"
] | 0.67506576 | 1 |
Configuration settings related to network traffic of the web application that the App Runner service runs. See Network Configuration below for more details. | def network_configuration(self) -> Optional[pulumi.Input['ServiceNetworkConfigurationArgs']]:
return pulumi.get(self, "network_configuration") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def network_settings(self): # type: () -> t.Dict[str, t.Any]\n return self.inspection['NetworkSettings']",
"def network_configuration(self) -> pulumi.Output['outputs.ServiceNetworkConfiguration']:\n return pulumi.get(self, \"network_configuration\")",
"def network_config(self) -> 'outputs.NetworkConfigResponse':\n return pulumi.get(self, \"network_config\")",
"def network_config(self) -> 'outputs.NetworkConfigResponse':\n return pulumi.get(self, \"network_config\")",
"def network_configurations(self) -> Sequence['outputs.ApplianceNetworkConfigurationResponse']:\n return pulumi.get(self, \"network_configurations\")",
"def network_configurations(self) -> Sequence['outputs.ApplianceNetworkConfigurationResponse']:\n return pulumi.get(self, \"network_configurations\")",
"def network_performance_config(self) -> Optional[pulumi.Input['NetworkPerformanceConfigArgs']]:\n return pulumi.get(self, \"network_performance_config\")",
"def app_settings():\n return {\n 'app_wksp_path': os.path.join(App.get_app_workspace().path, ''),\n 'threddsdatadir': App.get_custom_setting(\"thredds_path\"),\n 'threddsurl': App.get_custom_setting(\"thredds_url\"),\n 'logfile': os.path.join(App.get_app_workspace().path, 'workflow.log')\n }",
"def network_config(self) -> Optional[pulumi.Input['NodeNetworkConfigArgs']]:\n return pulumi.get(self, \"network_config\")",
"def network_config(self) -> pulumi.Input['PrivateCloudNetworkConfigArgs']:\n return pulumi.get(self, \"network_config\")",
"def network_config(self) -> pulumi.Output['outputs.PrivateCloudNetworkConfig']:\n return pulumi.get(self, \"network_config\")",
"def __init__(self) -> None:\n\n self.config_keys = ['APPS_HOST', 'APPS_PORT']\n super().__init__()\n\n self.APPS_HOST = str(self.APPS_HOST)\n \"\"\"Host where the server will be served\"\"\"\n\n self.APPS_PORT = int(self.APPS_PORT)\n \"\"\"Port where the server will be served\"\"\"",
"def config(self):\n self._resource_manager = self._api._ixnetwork.ResourceManager\n self._ixn_vport = self._api._vport\n self._delete_vports()\n self._create_vports()\n self._create_capture()\n self._set_location()\n self._set_layer1()",
"def init_settings(self):\n self.app.config.setdefault('SIMPLE_DOMAINS', [])\n self.app.config.setdefault('AWS_ACCESS_KEY_ID', environ.get('AWS_ACCESS_KEY_ID'))\n self.app.config.setdefault('AWS_SECRET_ACCESS_KEY', environ.get('AWS_SECRET_ACCESS_KEY'))\n self.app.config.setdefault('AWS_REGION', environ.get('AWS_REGION', self.DEFAULT_REGION))",
"def network_config(self) -> Optional[pulumi.Input['PrivateCloudNetworkConfigArgs']]:\n return pulumi.get(self, \"network_config\")",
"def get_default_config(self):\n config = super(BindCollector, self).get_default_config()\n config.update({\n 'host': 'localhost',\n 'port': 8080,\n 'path': 'bind',\n # Available stats:\n # - resolver (Per-view resolver and cache statistics)\n # - server (Incoming requests and their answers)\n # - zonemgmt (Requests/responses related to zone management)\n # - sockets (Socket statistics)\n # - memory (Global memory usage)\n 'publish': [\n 'resolver',\n 'server',\n 'zonemgmt',\n 'sockets',\n 'memory',\n ],\n # By default we don't publish these special views\n 'publish_view_bind': False,\n 'publish_view_meta': False,\n })\n return config",
"def network_configuration(self) -> Optional['outputs.ScheduleTargetEcsParametersNetworkConfiguration']:\n return pulumi.get(self, \"network_configuration\")",
"def network_config(args): # pylint: disable-msg=W0613\n if not NETLOCK.acquire_read(NET_LOCK_TIMEOUT):\n raise HttpReqError(503, \"unable to take NETLOCK for reading after %s seconds\" % NET_LOCK_TIMEOUT)\n try:\n netconf = xivo_config.load_current_configuration()\n return yaml_json.stringify_keys(netconf)\n finally:\n NETLOCK.release()",
"def configure_httpd_service_ipa_conf(self):\n raise NotImplementedError()",
"def setup_net(self):\n pass",
"def main() -> None:\n config = get_config()\n app = Application()\n web_config = config[\"web\"]\n webapp = WebApp(config)\n webapp.attach_to(app)\n\n run_config = keep(web_config, {\"host\", \"port\"})\n run_app(app, **run_config)",
"def config_networking(\n self, network_obj, ip, netmask, gateway, domain, dns, guest_hostname\n ):\n\n global_ip = vim.vm.customization.GlobalIPSettings()\n adapter_map = vim.vm.customization.AdapterMapping()\n adapter_map.adapter = vim.vm.customization.IPSettings()\n adapter_map.macAddress = network_obj.macAddress\n if ip:\n adapter_map.adapter.ip = vim.vm.customization.FixedIp()\n adapter_map.adapter.ip.ipAddress = ip\n else:\n adapter_map.adapter.ip = vim.vm.customization.DhcpIpGenerator()\n adapter_map.adapter.subnetMask = netmask\n adapter_map.adapter.gateway = gateway\n global_ip.dnsServerList = dns\n adapter_map.adapter.dnsDomain = domain\n ident = vim.vm.customization.LinuxPrep()\n ident.hostName = vim.vm.customization.FixedName()\n if guest_hostname:\n ident.hostName.name = guest_hostname\n else:\n ident.hostName.name = self.vm_obj.name\n custom_spec = vim.vm.customization.Specification()\n custom_spec.nicSettingMap = [adapter_map]\n custom_spec.identity = ident\n custom_spec.globalIPSettings = global_ip\n return self.vm_obj.Customize(spec=custom_spec)",
"def network_config(self):\n\n if self._network_config:\n return self._network_config\n\n interfaces = self.metadata.get('interfaces')\n\n if not interfaces:\n raise Exception(\"Unable to get meta-data from server....\")\n\n # Convert Vultr network configuration to cloudinit.net format\n\n # Example JSON:\n # [\n # {\n # \"ipv4\": {\n # \"additional\": [\n # {\n # \"address\": \"192.0.2.3\",\n # \"netmask\": \"255.255.255.0\"\n # }\n # ],\n # \"address\": \"192.0.2.2\",\n # \"gateway\": \"192.0.2.1\",\n # \"netmask\": \"255.255.255.0\"\n # },\n # \"ipv6\": {\n # \"additional\": [\n # {\n # \"network\": \"2001:0db8:0:2::\",\n # \"prefix\": \"64\"\n # }\n # ],\n # \"address\": \"2001:0db8:0:1:5428:d5ff:fe28:1910\",\n # \"network\": \"2001:0db8:0:1::\",\n # \"prefix\": \"64\"\n # },\n # \"mac\": \"00:00:00:00:00:00\",\n # \"network-type\": \"public\"\n # },\n # ......\n # ]\n\n nic_configs = []\n macs_to_nics = cloudnet.get_interfaces_by_mac()\n LOG.debug(\"nic mapping: %s\", macs_to_nics)\n\n config = []\n for vultr_ip_dict in interfaces:\n mac = vultr_ip_dict[\"mac\"]\n\n if mac not in macs_to_nics:\n raise ValueError(\"Did not find network interface on system \"\n \"with mac '%s'. Cannot apply configuration: %s\"\n % (mac_address, nic))\n if_name = macs_to_nics[mac] # if_name = string 'eth0', ...\n if_config= {\n 'type': 'physical',\n 'mac_address': mac,\n 'name': if_name,\n 'subnets': [{\n 'type': 'dhcp',\n 'control': 'auto',\n }\n ]\n }\n config.append(if_config)\n\n LOG.debug(\"nic '%s' configuration: %s\", if_name, if_config)\n\n LOG.debug(\"added dns servers: %s\", self.dns_servers)\n config.append({'type': 'nameserver', 'address': self.dns_servers})\n\n return {'version': 1, 'config': config}",
"def runtime_config(self) -> str:\n return self._node[\"app_data\"].get(\"runtime_config\")",
"async def index_controller(self, request):\n return {\"port\": self.config.http_port, \"ip\": self.config.http_ip}",
"def project_settings(request):\n webnode_settings = kakocase_settings(request)\n webnode_settings['settings']['IS_WEBNODE'] = True\n return webnode_settings",
"def configuration():",
"def networks(self): # type: () -> t.Optional[t.Dict[str, t.Dict[str, t.Any]]]\n return self.network_settings.get('Networks')",
"def ports(self):\n return self.attrs.get('NetworkSettings', {}).get('Ports', {})",
"def port(self):\n return 5000"
] | [
"0.6015695",
"0.59456706",
"0.5870308",
"0.5870308",
"0.5752671",
"0.5752671",
"0.56932443",
"0.5645092",
"0.5629313",
"0.5614321",
"0.5545634",
"0.55261374",
"0.5504293",
"0.5500942",
"0.54950154",
"0.54877526",
"0.54553556",
"0.5371222",
"0.5335917",
"0.5307572",
"0.5297308",
"0.5288836",
"0.5280086",
"0.52652663",
"0.52610016",
"0.52499044",
"0.5220934",
"0.52102673",
"0.5207281",
"0.5197148"
] | 0.6002301 | 1 |
The observability configuration of your service. See Observability Configuration below for more details. | def observability_configuration(self) -> Optional[pulumi.Input['ServiceObservabilityConfigurationArgs']]:
return pulumi.get(self, "observability_configuration") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def observability_configuration(self) -> pulumi.Output[Optional['outputs.ServiceObservabilityConfiguration']]:\n return pulumi.get(self, \"observability_configuration\")",
"def fleetobservability(self) -> Optional['outputs.FeatureSpecFleetobservability']:\n return pulumi.get(self, \"fleetobservability\")",
"def config(self):\n raise NotImplementedError",
"def definition_of_services(self):\r\n return True",
"def getConfiguration(self):\n raise NotImplementedError",
"def config(self):\n return self.namespace['config']",
"def logging_config(self) -> Optional['outputs.FeatureSpecFleetobservabilityLoggingConfig']:\n return pulumi.get(self, \"logging_config\")",
"def config(self):\n pass",
"def config(self):\n pass",
"def get_observing_sites(self):\n pass",
"def service(self):\n pass",
"def observ(self):\n return self._observ.read_value()",
"def config(self):\n return None",
"def configuration():",
"def config(self):\r\n return self._config",
"def config(self) -> Any:\n return self._config",
"def monitoring_config(self) -> 'outputs.MonitoringConfigResponse':\n return pulumi.get(self, \"monitoring_config\")",
"def config(self):\n return self._config",
"def Get():\n return ServiceConfig() # Singleton decorator ensures there's only one",
"def get_config_spec(cls):\n return False",
"def get_default_config(self):\n config = super(BindCollector, self).get_default_config()\n config.update({\n 'host': 'localhost',\n 'port': 8080,\n 'path': 'bind',\n # Available stats:\n # - resolver (Per-view resolver and cache statistics)\n # - server (Incoming requests and their answers)\n # - zonemgmt (Requests/responses related to zone management)\n # - sockets (Socket statistics)\n # - memory (Global memory usage)\n 'publish': [\n 'resolver',\n 'server',\n 'zonemgmt',\n 'sockets',\n 'memory',\n ],\n # By default we don't publish these special views\n 'publish_view_bind': False,\n 'publish_view_meta': False,\n })\n return config",
"def config(self):\n return self.__config",
"def config(self):\n return self.__config",
"def config(self) -> ServerConfig:\n return self._config",
"def op_config(self) -> Any:\n return self.solid_config",
"def config(self, request):\n config = OtterConfig(self.store, self.tenant_id, self.group_id,\n self.dispatcher)\n return config.app.resource()",
"def config(self):\n annotations = IAnnotations(self.context)\n return annotations.get(CONFIGURATION_KEY, {})",
"def config(self):\n return self._config",
"def config(self):\n return self._config",
"def config(self):\n return self._config"
] | [
"0.7956246",
"0.56444937",
"0.531778",
"0.52681047",
"0.52177036",
"0.51991004",
"0.5184514",
"0.51248384",
"0.51248384",
"0.51227665",
"0.51139295",
"0.5072489",
"0.5047751",
"0.49704665",
"0.49501875",
"0.4946865",
"0.4931813",
"0.49101347",
"0.48935717",
"0.48596627",
"0.48471427",
"0.4833268",
"0.4833268",
"0.48131076",
"0.48034406",
"0.48005727",
"0.47983712",
"0.47911292",
"0.47911292",
"0.47911292"
] | 0.81733936 | 0 |
The runtime configuration of instances (scaling units) of the App Runner service. See Instance Configuration below for more details. | def instance_configuration(self) -> Optional[pulumi.Input['ServiceInstanceConfigurationArgs']]:
return pulumi.get(self, "instance_configuration") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def auto_scaling(self):\n return self.container['auto_scaling']",
"def instance_configuration(self) -> pulumi.Output['outputs.ServiceInstanceConfiguration']:\n return pulumi.get(self, \"instance_configuration\")",
"def runtime_config(self) -> str:\n return self._node[\"app_data\"].get(\"runtime_config\")",
"def autoscaling(self) -> Optional[pulumi.Input['NodePoolAutoscalingArgs']]:\n return pulumi.get(self, \"autoscaling\")",
"def configuration(self):\n\t\n limits = dict(\n \t cpu = resource.getrlimit(resource.RLIMIT_CPU)[0],\n\t memory = resource.getrlimit(resource.RLIMIT_AS)[0],\n disk = resource.getrlimit(resource.RLIMIT_FSIZE)[0]\n )\n\t\n\ttmpdir = os.getenv('TMPDIR')\n\tif tmpdir:\n\t tag = os.path.basename(tmpdir)\n\t jobid, taskid, queue = tag.split('.')\n\telse:\n\t jobid = taskid = queue = None\n\t\n\tworkdir = os.getenv('SGE_O_WORKDIR')\n\tif not workdir:\n\t workdir = os.getcwd()\n\t\n\t# Get the real time limit.\n\tif queue is None:\n\t limits['time'] = None\n\telse:\n\t command = \"qconf -sq pa_medium | grep s_rt\"\n\t pipe = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = pipe.communicate()\n\t time = map(float, stdout.split()[1].split(':'))\n\t time = (time[0]*60.+time[1])*60.+time[2]\n\t limits['time'] = time \n\t\n\treturn dict(\n\t host = os.getenv('HOSTNAME'),\n\t jobid = jobid,\n\t taskid = taskid,\n\t queue = queue,\n\t limits = limits,\n\t tmpdir = tmpdir,\n\t workdir = workdir \n\t)",
"def list_runtimes(self, docker_image_name='all'):\n logger.debug('Listing runtimes')\n logger.debug('Note that k8s job backend does not manage runtimes')\n return []",
"def GenerateConfig(context):\n\n resources = [{\n 'name': context.env['name'],\n 'type': 'compute.v1.instance',\n 'properties': {\n 'zone': context.properties['zone'],\n 'machineType': ''.join([COMPUTE_URL_BASE, 'projects/',\n context.env['project'], '/zones/',\n context.properties['zone'], '/machineTypes/',\n context.properties['machineType']]),\n 'disks': [{\n 'deviceName': 'boot',\n 'type': 'PERSISTENT',\n 'boot': True,\n 'autoDelete': True,\n 'initializeParams': {\n 'sourceImage': ''.join([COMPUTE_URL_BASE, 'projects/',\n 'ubuntu-os-cloud/global/',\n 'images/family/ubuntu-1604-lts'])\n }\n }],\n 'networkInterfaces': [{\n 'network': '$(ref.' + context.properties['network']\n + '.selfLink)',\n 'accessConfigs': [{\n 'name': 'External NAT',\n 'type': 'ONE_TO_ONE_NAT'\n }]\n }],\n 'metadata': {\n 'items': [{\n 'key': 'startup-script',\n 'value': ''.join(['#!/bin/bash\\n',\n 'sudo apt-get install openjdk-9-jre-headless -y\\n',\n 'sudo python -m SimpleHTTPServer 80'])\n }]\n }\n }\n }]\n return {'resources': resources}",
"def runtime(self) -> str:\n return self._node[\"app_data\"].get(\"runtime\")",
"def list_runtimes(config, backend, debug):\n log_level = logging.INFO if not debug else logging.DEBUG\n setup_lithops_logger(log_level)\n\n if config:\n config = load_yaml_config(config)\n\n config_ow = set_config_ow(backend, runtime_name='None')\n config = default_config(config, config_ow, load_storage_config=False)\n\n if config['lithops']['mode'] != SERVERLESS:\n raise Exception('\"lithops runtime list\" command is only valid for serverless backends')\n\n compute_config = extract_serverless_config(config)\n compute_handler = ServerlessHandler(compute_config, None)\n runtimes = compute_handler.list_runtimes()\n\n if runtimes:\n width = max([len(runtime[0]) for runtime in runtimes])\n\n print('\\n{:{width}} \\t {}'.format('Runtime Name', 'Memory Size (MB)', width=width))\n print('-' * width, '\\t', '-' * 20)\n for runtime in runtimes:\n name = runtime[0]\n mem = runtime[1]\n print('{:{width}} \\t {}'.format(name, mem, width=width))\n print()\n print('Total runtimes: {}'.format(len(runtimes)))\n else:\n width = 10\n print('\\n{:{width}} \\t {}'.format('Runtime Name', 'Memory Size (MB)', width=width))\n print('-' * width, '\\t', '-' * 20)\n print('\\nNo runtimes deployed')",
"def generate(self):\n fleet_config = self._build_base_object()\n fleet_config['LaunchSpecifications'] = list(self._build_launch_specs_object())\n return fleet_config",
"def describe_time_based_auto_scaling(InstanceIds=None):\n pass",
"def runtime(self):\n return self._runtime",
"def runtime(self):\n return self._runtime",
"def autoscaling_metric_specs(self) -> Optional[Sequence['outputs.AiEndpointDeployedModelDedicatedResourceAutoscalingMetricSpec']]:\n return pulumi.get(self, \"autoscaling_metric_specs\")",
"def workload_runtime(self) -> Optional[pulumi.Input[Union[str, 'WorkloadRuntime']]]:\n return pulumi.get(self, \"workload_runtime\")",
"def __init__(__self__, *,\n additional_info: Optional[pulumi.Input[str]] = None,\n affinity: Optional[pulumi.Input[str]] = None,\n availability_zone: Optional[pulumi.Input[str]] = None,\n block_device_mappings: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceBlockDeviceMappingArgs']]]] = None,\n cpu_options: Optional[pulumi.Input['InstanceCpuOptionsArgs']] = None,\n credit_specification: Optional[pulumi.Input['InstanceCreditSpecificationArgs']] = None,\n disable_api_termination: Optional[pulumi.Input[bool]] = None,\n ebs_optimized: Optional[pulumi.Input[bool]] = None,\n elastic_gpu_specifications: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceElasticGpuSpecificationArgs']]]] = None,\n elastic_inference_accelerators: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceElasticInferenceAcceleratorArgs']]]] = None,\n enclave_options: Optional[pulumi.Input['InstanceEnclaveOptionsArgs']] = None,\n hibernation_options: Optional[pulumi.Input['InstanceHibernationOptionsArgs']] = None,\n host_id: Optional[pulumi.Input[str]] = None,\n host_resource_group_arn: Optional[pulumi.Input[str]] = None,\n iam_instance_profile: Optional[pulumi.Input[str]] = None,\n image_id: Optional[pulumi.Input[str]] = None,\n instance_initiated_shutdown_behavior: Optional[pulumi.Input[str]] = None,\n instance_type: Optional[pulumi.Input[str]] = None,\n ipv6_address_count: Optional[pulumi.Input[int]] = None,\n ipv6_addresses: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceIpv6AddressArgs']]]] = None,\n kernel_id: Optional[pulumi.Input[str]] = None,\n key_name: Optional[pulumi.Input[str]] = None,\n launch_template: Optional[pulumi.Input['InstanceLaunchTemplateSpecificationArgs']] = None,\n license_specifications: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceLicenseSpecificationArgs']]]] = None,\n monitoring: Optional[pulumi.Input[bool]] = None,\n network_interfaces: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceNetworkInterfaceArgs']]]] = None,\n placement_group_name: Optional[pulumi.Input[str]] = None,\n private_dns_name_options: Optional[pulumi.Input['InstancePrivateDnsNameOptionsArgs']] = None,\n private_ip_address: Optional[pulumi.Input[str]] = None,\n propagate_tags_to_volume_on_creation: Optional[pulumi.Input[bool]] = None,\n ramdisk_id: Optional[pulumi.Input[str]] = None,\n security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n source_dest_check: Optional[pulumi.Input[bool]] = None,\n ssm_associations: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceSsmAssociationArgs']]]] = None,\n subnet_id: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceTagArgs']]]] = None,\n tenancy: Optional[pulumi.Input[str]] = None,\n user_data: Optional[pulumi.Input[str]] = None,\n volumes: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceVolumeArgs']]]] = None):\n if additional_info is not None:\n pulumi.set(__self__, \"additional_info\", additional_info)\n if affinity is not None:\n pulumi.set(__self__, \"affinity\", affinity)\n if availability_zone is not None:\n pulumi.set(__self__, \"availability_zone\", availability_zone)\n if block_device_mappings is not None:\n pulumi.set(__self__, \"block_device_mappings\", block_device_mappings)\n if cpu_options is not None:\n pulumi.set(__self__, \"cpu_options\", cpu_options)\n if credit_specification is not None:\n pulumi.set(__self__, \"credit_specification\", credit_specification)\n if disable_api_termination is not None:\n pulumi.set(__self__, \"disable_api_termination\", disable_api_termination)\n if ebs_optimized is not None:\n pulumi.set(__self__, \"ebs_optimized\", ebs_optimized)\n if elastic_gpu_specifications is not None:\n pulumi.set(__self__, \"elastic_gpu_specifications\", elastic_gpu_specifications)\n if elastic_inference_accelerators is not None:\n pulumi.set(__self__, \"elastic_inference_accelerators\", elastic_inference_accelerators)\n if enclave_options is not None:\n pulumi.set(__self__, \"enclave_options\", enclave_options)\n if hibernation_options is not None:\n pulumi.set(__self__, \"hibernation_options\", hibernation_options)\n if host_id is not None:\n pulumi.set(__self__, \"host_id\", host_id)\n if host_resource_group_arn is not None:\n pulumi.set(__self__, \"host_resource_group_arn\", host_resource_group_arn)\n if iam_instance_profile is not None:\n pulumi.set(__self__, \"iam_instance_profile\", iam_instance_profile)\n if image_id is not None:\n pulumi.set(__self__, \"image_id\", image_id)\n if instance_initiated_shutdown_behavior is not None:\n pulumi.set(__self__, \"instance_initiated_shutdown_behavior\", instance_initiated_shutdown_behavior)\n if instance_type is not None:\n pulumi.set(__self__, \"instance_type\", instance_type)\n if ipv6_address_count is not None:\n pulumi.set(__self__, \"ipv6_address_count\", ipv6_address_count)\n if ipv6_addresses is not None:\n pulumi.set(__self__, \"ipv6_addresses\", ipv6_addresses)\n if kernel_id is not None:\n pulumi.set(__self__, \"kernel_id\", kernel_id)\n if key_name is not None:\n pulumi.set(__self__, \"key_name\", key_name)\n if launch_template is not None:\n pulumi.set(__self__, \"launch_template\", launch_template)\n if license_specifications is not None:\n pulumi.set(__self__, \"license_specifications\", license_specifications)\n if monitoring is not None:\n pulumi.set(__self__, \"monitoring\", monitoring)\n if network_interfaces is not None:\n pulumi.set(__self__, \"network_interfaces\", network_interfaces)\n if placement_group_name is not None:\n pulumi.set(__self__, \"placement_group_name\", placement_group_name)\n if private_dns_name_options is not None:\n pulumi.set(__self__, \"private_dns_name_options\", private_dns_name_options)\n if private_ip_address is not None:\n pulumi.set(__self__, \"private_ip_address\", private_ip_address)\n if propagate_tags_to_volume_on_creation is not None:\n pulumi.set(__self__, \"propagate_tags_to_volume_on_creation\", propagate_tags_to_volume_on_creation)\n if ramdisk_id is not None:\n pulumi.set(__self__, \"ramdisk_id\", ramdisk_id)\n if security_group_ids is not None:\n pulumi.set(__self__, \"security_group_ids\", security_group_ids)\n if security_groups is not None:\n pulumi.set(__self__, \"security_groups\", security_groups)\n if source_dest_check is not None:\n pulumi.set(__self__, \"source_dest_check\", source_dest_check)\n if ssm_associations is not None:\n pulumi.set(__self__, \"ssm_associations\", ssm_associations)\n if subnet_id is not None:\n pulumi.set(__self__, \"subnet_id\", subnet_id)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if tenancy is not None:\n pulumi.set(__self__, \"tenancy\", tenancy)\n if user_data is not None:\n pulumi.set(__self__, \"user_data\", user_data)\n if volumes is not None:\n pulumi.set(__self__, \"volumes\", volumes)",
"def scaling_config(self) -> Optional[pulumi.Input['EventSourceMappingScalingConfigArgs']]:\n return pulumi.get(self, \"scaling_config\")",
"def get_running_instance_count(body):\n\tbody = dict(body)\n\tconn = autoscale.connect_to_region(region_name='us-west-2',\n\t\t\t\t\t\t\t\taws_access_key_id=body[\"aws_access_key_id\"],\n\t\t\t\t\t\t\t\taws_secret_access_key=body[\"aws_secret_access_key\"])\n\tinstance_status = conn.get_all_autoscaling_instances()\n\trunning_instances = 0\n\tfor item in instance_status:\n\t\tif (item.health_status == 'HEALTHY'):\n\t\t\trunning_instances += 1\n\treturn {\"data\":running_instances}",
"def list_runtimes(self, runtime_name='all'):\n return self.compute_handler.list_runtimes(runtime_name)",
"def scaling_config(self) -> pulumi.Output[Optional['outputs.EventSourceMappingScalingConfig']]:\n return pulumi.get(self, \"scaling_config\")",
"def set_time_based_auto_scaling(InstanceId=None, AutoScalingSchedule=None):\n pass",
"def run_configuration(self) -> Optional[pulumi.Input['ApplicationApplicationConfigurationRunConfigurationArgs']]:\n return pulumi.get(self, \"run_configuration\")",
"def runtime(self) -> str:\n return self._runtime",
"def running_config(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"running_config\"), kwargs)",
"def scaling(self) -> Optional['outputs.AiFeatureStoreOnlineServingConfigScaling']:\n return pulumi.get(self, \"scaling\")",
"def get_config():\n config = ml_collections.ConfigDict()\n\n # Which model to use -- see ./models.py\n config.model_name = 'ViT-B_32'\n # Where to store training logs.\n config.log_dir = '.'\n\n # Number of steps to measure.\n config.steps = 30\n # Number of steps before measuring.\n config.initial_steps = 10\n\n # Batch size\n config.batch = 0\n # Number of output classes.\n config.num_classes = 0\n # Image size (width=height).\n config.image_size = 0\n\n config.train = 'inference_time'\n\n return config",
"def run_instances(self, image_id, min_count=1, max_count=1,\r\n key_name=None, security_groups=None,\r\n user_data=None, addressing_type=None,\r\n instance_type='m1.small', placement=None,\r\n kernel_id=None, ramdisk_id=None,\r\n monitoring_enabled=False, subnet_id=None,\r\n block_device_map=None,\r\n disable_api_termination=False,\r\n instance_initiated_shutdown_behavior=None,\r\n private_ip_address=None,\r\n placement_group=None, client_token=None,\r\n security_group_ids=None):\r\n params = {'ImageId':image_id,\r\n 'MinCount':min_count,\r\n 'MaxCount': max_count}\r\n if key_name:\r\n params['KeyName'] = key_name\r\n if security_group_ids:\r\n l = []\r\n for group in security_group_ids:\r\n if isinstance(group, SecurityGroup):\r\n l.append(group.name)\r\n else:\r\n l.append(group)\r\n self.build_list_params(params, l, 'SecurityGroupId')\r\n if security_groups:\r\n l = []\r\n for group in security_groups:\r\n if isinstance(group, SecurityGroup):\r\n l.append(group.name)\r\n else:\r\n l.append(group)\r\n self.build_list_params(params, l, 'SecurityGroup')\r\n if user_data:\r\n params['UserData'] = base64.b64encode(user_data)\r\n if addressing_type:\r\n params['AddressingType'] = addressing_type\r\n if instance_type:\r\n params['InstanceType'] = instance_type\r\n if placement:\r\n params['Placement.AvailabilityZone'] = placement\r\n if placement_group:\r\n params['Placement.GroupName'] = placement_group\r\n if kernel_id:\r\n params['KernelId'] = kernel_id\r\n if ramdisk_id:\r\n params['RamdiskId'] = ramdisk_id\r\n if monitoring_enabled:\r\n params['Monitoring.Enabled'] = 'true'\r\n if subnet_id:\r\n params['SubnetId'] = subnet_id\r\n if private_ip_address:\r\n params['PrivateIpAddress'] = private_ip_address\r\n if block_device_map:\r\n block_device_map.build_list_params(params)\r\n if disable_api_termination:\r\n params['DisableApiTermination'] = 'true'\r\n if instance_initiated_shutdown_behavior:\r\n val = instance_initiated_shutdown_behavior\r\n params['InstanceInitiatedShutdownBehavior'] = val\r\n if client_token:\r\n params['ClientToken'] = client_token\r\n return self.get_object('RunInstances', params, Reservation, verb='POST')",
"def get_instance_costs_for_config(runhis: RunHistory, config: Configuration):\n config_id = runhis.config_ids.get(config)\n runs_ = runhis._configid_to_inst_seed.get(config_id, [])\n cost_per_inst = {}\n for inst, seed in runs_:\n cost_per_inst[inst] = cost_per_inst.get(inst, [])\n rkey = RunKey(config_id, inst, seed)\n vkey = runhis.data[rkey]\n cost_per_inst[inst].append(vkey.cost)\n cost_per_inst = dict([(inst, np.mean(costs)) for inst, costs in cost_per_inst.items()])\n return cost_per_inst",
"def _configure_all_tasks(self, config, job_exe, job_type):\n\n config.set_task_ids(job_exe.get_cluster_id())\n\n for task_type in config.get_task_types():\n # Configure env vars describing allocated task resources\n env_vars = {}\n nvidia_docker_label = None\n\n for resource in config.get_resources(task_type).resources:\n env_name = 'ALLOCATED_%s' % normalize_env_var_name(resource.name)\n env_vars[env_name] = '%.1f' % resource.value # Assumes scalar resources\n if resource.name == \"gpus\" and int(resource.value) > 0:\n gpu_list = GPUManager.get_nvidia_docker_label(job_exe.node_id, job_exe.job_id)\n nvidia_docker_label = DockerParameter('env','NVIDIA_VISIBLE_DEVICES={}'.format(gpu_list.strip(',')))\n\n # Configure env vars for Scale meta-data\n env_vars['SCALE_JOB_ID'] = unicode(job_exe.job_id)\n env_vars['SCALE_EXE_NUM'] = unicode(job_exe.exe_num)\n if job_exe.recipe_id:\n env_vars['SCALE_RECIPE_ID'] = unicode(job_exe.recipe_id)\n if job_exe.batch_id:\n env_vars['SCALE_BATCH_ID'] = unicode(job_exe.batch_id)\n\n # Configure workspace volumes\n workspace_volumes = {}\n for task_workspace in config.get_workspaces(task_type):\n logger.debug(self._workspaces)\n workspace_model = self._workspaces[task_workspace.name]\n # TODO: Should refactor workspace broker to return a Volume object and remove BrokerVolume\n if workspace_model.volume:\n vol_name = get_workspace_volume_name(job_exe, task_workspace.name)\n cont_path = get_workspace_volume_path(workspace_model.name)\n if workspace_model.volume.host:\n host_path = workspace_model.volume.remote_path\n volume = Volume(vol_name, cont_path, task_workspace.mode, is_host=True, host_path=host_path)\n else:\n driver = workspace_model.volume.driver\n driver_opts = {}\n # TODO: Hack alert for nfs broker, as stated above, we should return Volume from broker\n if driver == 'nfs':\n driver_opts = {'share': workspace_model.volume.remote_path}\n volume = Volume(vol_name, cont_path, task_workspace.mode, is_host=False, driver=driver,\n driver_opts=driver_opts)\n workspace_volumes[task_workspace.name] = volume\n\n config.add_to_task(task_type, env_vars=env_vars, wksp_volumes=workspace_volumes)\n\n # Labels for metric grouping\n job_id_label = DockerParameter('label', 'scale-job-id={}'.format(job_exe.job_id))\n job_execution_id_label = DockerParameter('label', 'scale-job-execution-id={}'.format(job_exe.exe_num))\n job_type_name_label = DockerParameter('label', 'scale-job-type-name={}'.format(job_type.name))\n job_type_version_label = DockerParameter('label', 'scale-job-type-version={}'.format(job_type.version))\n main_label = DockerParameter('label', 'scale-task-type=main')\n if nvidia_docker_label:\n nvidia_runtime_param = DockerParameter('runtime', 'nvidia')\n config.add_to_task('main', docker_params=[job_id_label, job_type_name_label, job_type_version_label,\n job_execution_id_label, main_label, nvidia_docker_label, nvidia_runtime_param])\n else:\n config.add_to_task('main', docker_params=[job_id_label, job_type_name_label, job_type_version_label,\n job_execution_id_label, main_label])\n\n if not job_type.is_system:\n pre_label = DockerParameter('label', 'scale-task-type=pre')\n post_label = DockerParameter('label', 'scale-task-type=post')\n config.add_to_task('pre', docker_params=[job_id_label, job_type_name_label, job_type_version_label,\n job_execution_id_label, pre_label])\n config.add_to_task('post', docker_params=[job_id_label, job_type_name_label, job_type_version_label,\n job_execution_id_label, post_label])\n\n # Configure tasks for logging\n if settings.LOGGING_ADDRESS is not None:\n log_driver = DockerParameter('log-driver', 'fluentd')\n fluent_precision = DockerParameter('log-opt', 'fluentd-sub-second-precision=true')\n log_address = DockerParameter('log-opt', 'fluentd-address=%s' % settings.LOGGING_ADDRESS)\n if not job_type.is_system:\n pre_task_tag = DockerParameter('log-opt', 'tag=%s|%s|%s|%s|%s' % (config.get_task_id('pre'),\n job_type.name,\n job_type.version,\n job_exe.job_id,\n job_exe.exe_num))\n config.add_to_task('pre', docker_params=[log_driver, fluent_precision, log_address, pre_task_tag])\n post_task_tag = DockerParameter('log-opt', 'tag=%s|%s|%s|%s|%s' % (config.get_task_id('post'),\n job_type.name,\n job_type.version,\n job_exe.job_id,\n job_exe.exe_num))\n config.add_to_task('post', docker_params=[log_driver, fluent_precision, log_address, post_task_tag])\n # TODO: remove es_urls parameter when Scale no longer supports old style job types\n\n # Post task needs ElasticSearch URL to grab logs for old artifact registration\n es_param = DockerParameter('env', 'ELASTICSEARCH_URL=%s' % settings.ELASTICSEARCH_URL)\n config.add_to_task('post', docker_params=[es_param])\n main_task_tag = DockerParameter('log-opt', 'tag=%s|%s|%s|%s|%s' % (config.get_task_id('main'),\n job_type.name,\n job_type.version,\n job_exe.job_id,\n job_exe.exe_num))\n config.add_to_task('main', docker_params=[log_driver, fluent_precision, log_address, main_task_tag])",
"def create_node(self, **kwargs):\n image = kwargs[\"image\"]\n size = kwargs[\"size\"]\n params = {\n 'Action': 'RunInstances',\n 'ImageId': image.id,\n 'MinCount': kwargs.get('ex_mincount','1'),\n 'MaxCount': kwargs.get('ex_maxcount','1'),\n 'InstanceType': size.id\n }\n\n if 'ex_securitygroup' in kwargs:\n if not isinstance(kwargs['ex_securitygroup'], list):\n kwargs['ex_securitygroup'] = [kwargs['ex_securitygroup']]\n for sig in range(len(kwargs['ex_securitygroup'])):\n params['SecurityGroup.%d' % (sig+1,)] = kwargs['ex_securitygroup'][sig]\n\n if 'location' in kwargs:\n availability_zone = getattr(kwargs['location'], 'availability_zone',\n None)\n if availability_zone:\n if availability_zone.region_name != self.region_name:\n raise AttributeError('Invalid availability zone: %s'\n % (availability_zone.name))\n params['Placement.AvailabilityZone'] = availability_zone.name\n\n if 'ex_keyname' in kwargs:\n params['KeyName'] = kwargs['ex_keyname']\n\n if 'ex_userdata' in kwargs:\n params['UserData'] = base64.b64encode(kwargs['ex_userdata'])\n\n if 'ex_clienttoken' in kwargs:\n params['ClientToken'] = kwargs['ex_clienttoken']\n\n object = self.connection.request(self.path, params=params).object\n nodes = self._to_nodes(object, 'instancesSet/item')\n\n if len(nodes) == 1:\n return nodes[0]\n else:\n return nodes"
] | [
"0.6600584",
"0.6487364",
"0.64526594",
"0.6082388",
"0.583839",
"0.5779044",
"0.57518584",
"0.5666383",
"0.5536144",
"0.5521754",
"0.5508416",
"0.54588073",
"0.54588073",
"0.5430509",
"0.5426012",
"0.5424304",
"0.54210854",
"0.53770417",
"0.53453577",
"0.5336255",
"0.5292929",
"0.5286647",
"0.52768636",
"0.52607673",
"0.5243419",
"0.52293676",
"0.5213463",
"0.5194801",
"0.51881546",
"0.51795965"
] | 0.65064335 | 1 |
The observability configuration of your service. See Observability Configuration below for more details. | def observability_configuration(self) -> Optional[pulumi.Input['ServiceObservabilityConfigurationArgs']]:
return pulumi.get(self, "observability_configuration") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def observability_configuration(self) -> pulumi.Output[Optional['outputs.ServiceObservabilityConfiguration']]:\n return pulumi.get(self, \"observability_configuration\")",
"def fleetobservability(self) -> Optional['outputs.FeatureSpecFleetobservability']:\n return pulumi.get(self, \"fleetobservability\")",
"def config(self):\n raise NotImplementedError",
"def definition_of_services(self):\r\n return True",
"def getConfiguration(self):\n raise NotImplementedError",
"def config(self):\n return self.namespace['config']",
"def logging_config(self) -> Optional['outputs.FeatureSpecFleetobservabilityLoggingConfig']:\n return pulumi.get(self, \"logging_config\")",
"def config(self):\n pass",
"def config(self):\n pass",
"def get_observing_sites(self):\n pass",
"def service(self):\n pass",
"def observ(self):\n return self._observ.read_value()",
"def config(self):\n return None",
"def configuration():",
"def config(self):\r\n return self._config",
"def config(self) -> Any:\n return self._config",
"def monitoring_config(self) -> 'outputs.MonitoringConfigResponse':\n return pulumi.get(self, \"monitoring_config\")",
"def config(self):\n return self._config",
"def Get():\n return ServiceConfig() # Singleton decorator ensures there's only one",
"def get_config_spec(cls):\n return False",
"def get_default_config(self):\n config = super(BindCollector, self).get_default_config()\n config.update({\n 'host': 'localhost',\n 'port': 8080,\n 'path': 'bind',\n # Available stats:\n # - resolver (Per-view resolver and cache statistics)\n # - server (Incoming requests and their answers)\n # - zonemgmt (Requests/responses related to zone management)\n # - sockets (Socket statistics)\n # - memory (Global memory usage)\n 'publish': [\n 'resolver',\n 'server',\n 'zonemgmt',\n 'sockets',\n 'memory',\n ],\n # By default we don't publish these special views\n 'publish_view_bind': False,\n 'publish_view_meta': False,\n })\n return config",
"def config(self):\n return self.__config",
"def config(self):\n return self.__config",
"def config(self) -> ServerConfig:\n return self._config",
"def op_config(self) -> Any:\n return self.solid_config",
"def config(self, request):\n config = OtterConfig(self.store, self.tenant_id, self.group_id,\n self.dispatcher)\n return config.app.resource()",
"def config(self):\n annotations = IAnnotations(self.context)\n return annotations.get(CONFIGURATION_KEY, {})",
"def config(self):\n return self._config",
"def config(self):\n return self._config",
"def config(self):\n return self._config"
] | [
"0.79563946",
"0.56439465",
"0.53201985",
"0.5269377",
"0.521966",
"0.52014226",
"0.518629",
"0.5127047",
"0.5127047",
"0.5122262",
"0.5114975",
"0.5073362",
"0.5050198",
"0.49730882",
"0.49526307",
"0.49493897",
"0.4934082",
"0.49124867",
"0.48943686",
"0.4861345",
"0.4848884",
"0.48353994",
"0.48353994",
"0.481506",
"0.4804777",
"0.48013067",
"0.48010242",
"0.47933275",
"0.47933275",
"0.47933275"
] | 0.8173864 | 1 |
Get an existing Service resource's state with the given name, id, and optional extra properties used to qualify the lookup. | def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
auto_scaling_configuration_arn: Optional[pulumi.Input[str]] = None,
encryption_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceEncryptionConfigurationArgs']]] = None,
health_check_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceHealthCheckConfigurationArgs']]] = None,
instance_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceInstanceConfigurationArgs']]] = None,
network_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceNetworkConfigurationArgs']]] = None,
observability_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceObservabilityConfigurationArgs']]] = None,
service_id: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
service_url: Optional[pulumi.Input[str]] = None,
source_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceSourceConfigurationArgs']]] = None,
status: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Service':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ServiceState.__new__(_ServiceState)
__props__.__dict__["arn"] = arn
__props__.__dict__["auto_scaling_configuration_arn"] = auto_scaling_configuration_arn
__props__.__dict__["encryption_configuration"] = encryption_configuration
__props__.__dict__["health_check_configuration"] = health_check_configuration
__props__.__dict__["instance_configuration"] = instance_configuration
__props__.__dict__["network_configuration"] = network_configuration
__props__.__dict__["observability_configuration"] = observability_configuration
__props__.__dict__["service_id"] = service_id
__props__.__dict__["service_name"] = service_name
__props__.__dict__["service_url"] = service_url
__props__.__dict__["source_configuration"] = source_configuration
__props__.__dict__["status"] = status
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
return Service(resource_name, opts=opts, __props__=__props__) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Service':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = ServiceArgs.__new__(ServiceArgs)\n\n __props__.__dict__[\"correlation_scheme\"] = None\n __props__.__dict__[\"default_move_cost\"] = None\n __props__.__dict__[\"etag\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"partition_description\"] = None\n __props__.__dict__[\"placement_constraints\"] = None\n __props__.__dict__[\"provisioning_state\"] = None\n __props__.__dict__[\"service_dns_name\"] = None\n __props__.__dict__[\"service_kind\"] = None\n __props__.__dict__[\"service_load_metrics\"] = None\n __props__.__dict__[\"service_package_activation_mode\"] = None\n __props__.__dict__[\"service_placement_policies\"] = None\n __props__.__dict__[\"service_type_name\"] = None\n __props__.__dict__[\"system_data\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"type\"] = None\n return Service(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Service':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = ServiceArgs.__new__(ServiceArgs)\n\n __props__.__dict__[\"capacity_provider_strategy\"] = None\n __props__.__dict__[\"cluster\"] = None\n __props__.__dict__[\"deployment_configuration\"] = None\n __props__.__dict__[\"deployment_controller\"] = None\n __props__.__dict__[\"desired_count\"] = None\n __props__.__dict__[\"enable_ecs_managed_tags\"] = None\n __props__.__dict__[\"enable_execute_command\"] = None\n __props__.__dict__[\"health_check_grace_period_seconds\"] = None\n __props__.__dict__[\"launch_type\"] = None\n __props__.__dict__[\"load_balancers\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"network_configuration\"] = None\n __props__.__dict__[\"placement_constraints\"] = None\n __props__.__dict__[\"placement_strategies\"] = None\n __props__.__dict__[\"platform_version\"] = None\n __props__.__dict__[\"propagate_tags\"] = None\n __props__.__dict__[\"role\"] = None\n __props__.__dict__[\"scheduling_strategy\"] = None\n __props__.__dict__[\"service_arn\"] = None\n __props__.__dict__[\"service_connect_configuration\"] = None\n __props__.__dict__[\"service_name\"] = None\n __props__.__dict__[\"service_registries\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"task_definition\"] = None\n return Service(resource_name, opts=opts, __props__=__props__)",
"def get_state_by_id(state_id):\n for key, value in storage.all(\"State\").items():\n if state_id == value.id:\n return jsonify(value.to_dict())\n abort(404)",
"def __getattr__(self, service_id):\n\n try:\n return self.services[service_id]\n except KeyError:\n raise exceptions.ServiceNotFoundError(f'No service found with ID \"{service_id}\".', service_id)",
"def get_state_by_id(state_id):\n state = storage.get(State, state_id)\n if not state:\n abort(404)\n return jsonify(state.to_dict()), 200",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n additional_locations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceAdditionalLocationArgs']]]]] = None,\n certificates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ServiceCertificateArgs']]]]] = None,\n client_certificate_enabled: Optional[pulumi.Input[bool]] = None,\n delegation: Optional[pulumi.Input[pulumi.InputType['ServiceDelegationArgs']]] = None,\n developer_portal_url: Optional[pulumi.Input[str]] = None,\n gateway_disabled: Optional[pulumi.Input[bool]] = None,\n gateway_regional_url: Optional[pulumi.Input[str]] = None,\n gateway_url: Optional[pulumi.Input[str]] = None,\n hostname_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceHostnameConfigurationArgs']]] = None,\n identity: Optional[pulumi.Input[pulumi.InputType['ServiceIdentityArgs']]] = None,\n location: Optional[pulumi.Input[str]] = None,\n management_api_url: Optional[pulumi.Input[str]] = None,\n min_api_version: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n notification_sender_email: Optional[pulumi.Input[str]] = None,\n policy: Optional[pulumi.Input[pulumi.InputType['ServicePolicyArgs']]] = None,\n portal_url: Optional[pulumi.Input[str]] = None,\n private_ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n protocols: Optional[pulumi.Input[pulumi.InputType['ServiceProtocolsArgs']]] = None,\n public_ip_address_id: Optional[pulumi.Input[str]] = None,\n public_ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n public_network_access_enabled: Optional[pulumi.Input[bool]] = None,\n publisher_email: Optional[pulumi.Input[str]] = None,\n publisher_name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n scm_url: Optional[pulumi.Input[str]] = None,\n security: Optional[pulumi.Input[pulumi.InputType['ServiceSecurityArgs']]] = None,\n sign_in: Optional[pulumi.Input[pulumi.InputType['ServiceSignInArgs']]] = None,\n sign_up: Optional[pulumi.Input[pulumi.InputType['ServiceSignUpArgs']]] = None,\n sku_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tenant_access: Optional[pulumi.Input[pulumi.InputType['ServiceTenantAccessArgs']]] = None,\n virtual_network_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceVirtualNetworkConfigurationArgs']]] = None,\n virtual_network_type: Optional[pulumi.Input[str]] = None,\n zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None) -> 'Service':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ServiceState.__new__(_ServiceState)\n\n __props__.__dict__[\"additional_locations\"] = additional_locations\n __props__.__dict__[\"certificates\"] = certificates\n __props__.__dict__[\"client_certificate_enabled\"] = client_certificate_enabled\n __props__.__dict__[\"delegation\"] = delegation\n __props__.__dict__[\"developer_portal_url\"] = developer_portal_url\n __props__.__dict__[\"gateway_disabled\"] = gateway_disabled\n __props__.__dict__[\"gateway_regional_url\"] = gateway_regional_url\n __props__.__dict__[\"gateway_url\"] = gateway_url\n __props__.__dict__[\"hostname_configuration\"] = hostname_configuration\n __props__.__dict__[\"identity\"] = identity\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"management_api_url\"] = management_api_url\n __props__.__dict__[\"min_api_version\"] = min_api_version\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"notification_sender_email\"] = notification_sender_email\n __props__.__dict__[\"policy\"] = policy\n __props__.__dict__[\"portal_url\"] = portal_url\n __props__.__dict__[\"private_ip_addresses\"] = private_ip_addresses\n __props__.__dict__[\"protocols\"] = protocols\n __props__.__dict__[\"public_ip_address_id\"] = public_ip_address_id\n __props__.__dict__[\"public_ip_addresses\"] = public_ip_addresses\n __props__.__dict__[\"public_network_access_enabled\"] = public_network_access_enabled\n __props__.__dict__[\"publisher_email\"] = publisher_email\n __props__.__dict__[\"publisher_name\"] = publisher_name\n __props__.__dict__[\"resource_group_name\"] = resource_group_name\n __props__.__dict__[\"scm_url\"] = scm_url\n __props__.__dict__[\"security\"] = security\n __props__.__dict__[\"sign_in\"] = sign_in\n __props__.__dict__[\"sign_up\"] = sign_up\n __props__.__dict__[\"sku_name\"] = sku_name\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tenant_access\"] = tenant_access\n __props__.__dict__[\"virtual_network_configuration\"] = virtual_network_configuration\n __props__.__dict__[\"virtual_network_type\"] = virtual_network_type\n __props__.__dict__[\"zones\"] = zones\n return Service(resource_name, opts=opts, __props__=__props__)",
"def get_state_by_id(state_id):\n my_state = storage.get('State', state_id)\n if my_state is None:\n abort(404)\n return jsonify(my_state.to_dict())",
"def state_by_id(state_id):\n states_values = storage.all(\"State\").values()\n for obj in states_values:\n if obj.id == state_id:\n return jsonify(obj.to_dict())\n abort(404)",
"def get_state_by_id(state_id):\r\n response = Response(json.dumps(json_error(ResponsesREST.INVALID_INPUT.value)),\r\n status=ResponsesREST.INVALID_INPUT.value, mimetype=\"application/json\")\r\n if validator_id.is_valid({\"id\": state_id}):\r\n state_get = State()\r\n state_get.id_state = state_id\r\n result = state_get.get_state()\r\n if result in (ResponsesREST.NOT_FOUND.value, ResponsesREST.SERVER_ERROR.value):\r\n response = Response(json.dumps(json_error(result)),\r\n status=result, mimetype=\"application/json\")\r\n else:\r\n response = Response(json.dumps(result.json_state()),\r\n status=ResponsesREST.SUCCESSFUL.value,\r\n mimetype=\"application/json\")\r\n return response",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n arn: Optional[pulumi.Input[str]] = None,\n auth_mode: Optional[pulumi.Input[str]] = None,\n default_s3_location: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n engine_security_group_id: Optional[pulumi.Input[str]] = None,\n idp_auth_url: Optional[pulumi.Input[str]] = None,\n idp_relay_state_parameter_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n service_role: Optional[pulumi.Input[str]] = None,\n subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n url: Optional[pulumi.Input[str]] = None,\n user_role: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n workspace_security_group_id: Optional[pulumi.Input[str]] = None) -> 'Studio':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _StudioState.__new__(_StudioState)\n\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"auth_mode\"] = auth_mode\n __props__.__dict__[\"default_s3_location\"] = default_s3_location\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"engine_security_group_id\"] = engine_security_group_id\n __props__.__dict__[\"idp_auth_url\"] = idp_auth_url\n __props__.__dict__[\"idp_relay_state_parameter_name\"] = idp_relay_state_parameter_name\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"service_role\"] = service_role\n __props__.__dict__[\"subnet_ids\"] = subnet_ids\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"url\"] = url\n __props__.__dict__[\"user_role\"] = user_role\n __props__.__dict__[\"vpc_id\"] = vpc_id\n __props__.__dict__[\"workspace_security_group_id\"] = workspace_security_group_id\n return Studio(resource_name, opts=opts, __props__=__props__)",
"def a_state(id):\n state = storage.get(State, id)\n if state is not None:\n return jsonify(state.to_dict())\n abort(404)",
"def state_by_id(state_id):\n state = storage.get(State, state_id)\n if state is None:\n abort(404)\n return jsonify(state.to_dict())",
"def get_service(self, service_id):\n raise exception.NotImplemented() # pragma: no cover",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n force: Optional[pulumi.Input[bool]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n state: Optional[pulumi.Input[str]] = None) -> 'InstanceState':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InstanceStateState.__new__(_InstanceStateState)\n\n __props__.__dict__[\"force\"] = force\n __props__.__dict__[\"instance_id\"] = instance_id\n __props__.__dict__[\"state\"] = state\n return InstanceState(resource_name, opts=opts, __props__=__props__)",
"def get_state(state_id):\n try:\n ''' Check that state_id exists '''\n query = State.select().where(State.id == state_id)\n if not query.exists():\n raise LookupError('state_id')\n\n state = State.get(State.id == state_id)\n return state.to_dict(), 200\n except LookupError as e:\n abort(404)\n except Exception as e:\n abort(500)",
"def get_one_state(state_id):\n state = storage.get('State', state_id)\n if state is None:\n abort(404)\n if request.method == 'DELETE':\n storage.delete(state)\n storage.save()\n return jsonify({}), 200\n elif request.method == 'PUT':\n try:\n res_dict = request.get_json()\n res_dict['id'] = state.id\n res_dict['created_at'] = state.created_at\n state.__init__(**res_dict)\n state.save()\n return jsonify(state.to_dict()), 200\n except:\n abort(400, description='Not a JSON')\n return jsonify(state.to_dict())",
"def get(self, request, state_id, format=None):\n try:\n state = State.objects.get(id=state_id)\n except ObjectDoesNotExist:\n raise NotFound(detail=\"State not found\")\n\n return Response(StateSerializer(state).data)",
"def statesById(state_id):\n obj = storage.get(State, state_id)\n if obj:\n return jsonify(obj.to_dict())\n return jsonify({\"error\": \"Not found\"}), 404",
"def a_states_id(state_id):\n i = storage.get(\"State\", state_id)\n if i:\n return jsonify(i.to_dict())\n else:\n return (jsonify({\"error\": \"Not found\"}), 404)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'FhirStore':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = FhirStoreArgs.__new__(FhirStoreArgs)\n\n __props__.__dict__[\"complex_data_type_reference_parsing\"] = None\n __props__.__dict__[\"dataset_id\"] = None\n __props__.__dict__[\"default_search_handling_strict\"] = None\n __props__.__dict__[\"disable_referential_integrity\"] = None\n __props__.__dict__[\"disable_resource_versioning\"] = None\n __props__.__dict__[\"enable_update_create\"] = None\n __props__.__dict__[\"fhir_store_id\"] = None\n __props__.__dict__[\"labels\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"notification_config\"] = None\n __props__.__dict__[\"notification_configs\"] = None\n __props__.__dict__[\"project\"] = None\n __props__.__dict__[\"stream_configs\"] = None\n __props__.__dict__[\"validation_config\"] = None\n __props__.__dict__[\"version\"] = None\n return FhirStore(resource_name, opts=opts, __props__=__props__)",
"def __getitem__(self, service_id):\n\n try:\n return self.services[service_id]\n except KeyError:\n raise exceptions.ServiceNotFoundError(f'No service found with ID \"{service_id}\".', service_id)",
"def get_service(self, _id):\n\n job = UpstartJob(_id, bus=self.bus)\n svc = Service(self)\n svc.id = _id\n svc.name = self.__fix_name(_id)\n try:\n svc.state = job.get_status()['state']\n svc.running = svc.state == 'running'\n except Exception as e:\n svc.running = False\n return svc",
"def get_state(state_id):\n try:\n state = jsonify(storage.get(State, state_id).to_dict())\n return state\n except:\n abort(404)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n add_on: Optional[pulumi.Input[pulumi.InputType['InstanceAddOnArgs']]] = None,\n arn: Optional[pulumi.Input[str]] = None,\n availability_zone: Optional[pulumi.Input[str]] = None,\n blueprint_id: Optional[pulumi.Input[str]] = None,\n bundle_id: Optional[pulumi.Input[str]] = None,\n cpu_count: Optional[pulumi.Input[int]] = None,\n created_at: Optional[pulumi.Input[str]] = None,\n ip_address_type: Optional[pulumi.Input[str]] = None,\n ipv6_address: Optional[pulumi.Input[str]] = None,\n ipv6_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n is_static_ip: Optional[pulumi.Input[bool]] = None,\n key_pair_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n private_ip_address: Optional[pulumi.Input[str]] = None,\n public_ip_address: Optional[pulumi.Input[str]] = None,\n ram_size: Optional[pulumi.Input[float]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n user_data: Optional[pulumi.Input[str]] = None,\n username: Optional[pulumi.Input[str]] = None) -> 'Instance':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InstanceState.__new__(_InstanceState)\n\n __props__.__dict__[\"add_on\"] = add_on\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"availability_zone\"] = availability_zone\n __props__.__dict__[\"blueprint_id\"] = blueprint_id\n __props__.__dict__[\"bundle_id\"] = bundle_id\n __props__.__dict__[\"cpu_count\"] = cpu_count\n __props__.__dict__[\"created_at\"] = created_at\n __props__.__dict__[\"ip_address_type\"] = ip_address_type\n __props__.__dict__[\"ipv6_address\"] = ipv6_address\n __props__.__dict__[\"ipv6_addresses\"] = ipv6_addresses\n __props__.__dict__[\"is_static_ip\"] = is_static_ip\n __props__.__dict__[\"key_pair_name\"] = key_pair_name\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"private_ip_address\"] = private_ip_address\n __props__.__dict__[\"public_ip_address\"] = public_ip_address\n __props__.__dict__[\"ram_size\"] = ram_size\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"user_data\"] = user_data\n __props__.__dict__[\"username\"] = username\n return Instance(resource_name, opts=opts, __props__=__props__)",
"def service_with_region(self, region_name, service_id, base_uri):\n key = (region_name, service_id)\n if key in self.uri_prefixes:\n return self.uri_prefixes[key].resource_for_region(\n self.uri_for_service(region_name, service_id, base_uri))",
"def get_state(state_id):\n state = storage.get(State, state_id)\n if state is None:\n abort(404)\n return jsonify(state.to_dict())",
"def getService(name):\n return Service.getService(name)",
"def get_by_id(self, _id):\n return Field(self.context, ResourcePathServiceOperation(\"getById\", [_id], self.resource_path))",
"def get_state(state_id):\n state = storage.get(\"State\", state_id)\n if state:\n return jsonify(state.to_dict())\n abort(404)",
"def _get_service(self, service_name):\n if self._service:\n return self._service\n res = self._cc.services().get_by_name(service_name, name='label')\n self._service = res.resource\n return self._service"
] | [
"0.6462436",
"0.6262246",
"0.5981988",
"0.5969614",
"0.5956969",
"0.59401536",
"0.5937153",
"0.59308213",
"0.59026575",
"0.5882669",
"0.5830883",
"0.58301145",
"0.5791222",
"0.57855755",
"0.57324743",
"0.5667298",
"0.56250405",
"0.5620358",
"0.559672",
"0.5583968",
"0.5560769",
"0.55323094",
"0.5529686",
"0.5512949",
"0.55086505",
"0.5489078",
"0.5473458",
"0.5461049",
"0.54544854",
"0.54358774"
] | 0.70002896 | 0 |
Settings of the health check that AWS App Runner performs to monitor the health of your service. See Health Check Configuration below for more details. | def health_check_configuration(self) -> pulumi.Output['outputs.ServiceHealthCheckConfiguration']:
return pulumi.get(self, "health_check_configuration") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def health_check_configuration(self) -> Optional[pulumi.Input['ServiceHealthCheckConfigurationArgs']]:\n return pulumi.get(self, \"health_check_configuration\")",
"def health_check_configuration(self) -> Optional[pulumi.Input['ServiceHealthCheckConfigurationArgs']]:\n return pulumi.get(self, \"health_check_configuration\")",
"def healthcheck(parameters): \n\n print(\"In healthcheck module\")",
"def health_check():\n app.logger.info(\"Health Check!\")\n return Response(\"All Good!\", status=200)",
"async def healthcheck(self):\n for service in self.services:\n await service.healthcheck()",
"def health_check():\n return dict(api_status='OK')",
"def _healthcheck():\n return '', 200",
"def health_check():\n # TODO: implement any other checking logic.\n return '', 200",
"def healthcheck(self):\n url = urljoin(self.url, \"/.well-known/healthcheck.json\")\n r = requests.get(url)\n return r.json()",
"def health_checks(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"health_checks\")",
"def health_checks(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"health_checks\")",
"def service_check(self, env):\n import params\n\n self.active_master_host = params.hawqmaster_host\n self.active_master_port = params.hawq_master_address_port\n self.checks_failed = 0\n self.total_checks = 2\n\n # Checks HAWQ cluster state\n self.check_state()\n\n # Runs check for writing and reading tables on HAWQ\n self.check_hawq()\n\n # Runs check for writing and reading external tables on HDFS using PXF, if PXF is installed\n if params.is_pxf_installed:\n self.total_checks += 1\n self.check_hawq_pxf_hdfs()\n else:\n Logger.info(\"PXF not installed. Skipping HAWQ-PXF checks...\")\n\n if self.checks_failed != 0:\n Logger.error(\"** FAILURE **: Service check failed {0} of {1} checks\".format(self.checks_failed, self.total_checks))\n sys.exit(1)\n\n Logger.info(\"Service check completed successfully\")",
"def health_check():\n ret = {\"Status\": 200, \"Msg\": \"Service is Up\"}\n return jsonify(ret)",
"def get_health_check(self):\n return util.create_response(output=\"OK\")",
"def health_check(name, target='TCP:22', healthy_threashold=2, unhealthy_threashold=3, interval=30, timeout=3):\n hc = HealthCheck(title=name + 'healthcheck')\n hc.HealthyThreshold = healthy_threashold\n hc.UnhealthyThreshold = unhealthy_threashold\n hc.Interval = interval\n hc.Target = target\n hc.Timeout = timeout\n return hc",
"def index():\n logging.debug('Healthy check.')\n pass # healthy check",
"def index():\n logging.debug('Healthy check.')\n pass # healthy check",
"def healthcheck():\n return make_response(jsonify(status=200, message='Healthy'), status.HTTP_200_OK)",
"def health_checks(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"health_checks\")",
"def health_check(self) -> Optional[str]:\n return pulumi.get(self, \"health_check\")",
"def health_check(self) -> Optional[str]:\n return pulumi.get(self, \"health_check\")",
"def health_check(self) -> Optional[str]:\n return pulumi.get(self, \"health_check\")",
"def health_check(self) -> Optional[str]:\n return pulumi.get(self, \"health_check\")",
"def health_check(self) -> Optional[str]:\n return pulumi.get(self, \"health_check\")",
"def health_check(self) -> Optional[str]:\n return pulumi.get(self, \"health_check\")",
"def health_check(self, *, scope: Scope) -> HealthCheckStatus:",
"def health_check(request):\n return Response(\"OK\",\n status=status.HTTP_200_OK)",
"def test_health(self) -> None:\n self._response = self._app.get('/health')\n\n self.assertEqual(self._response.status, '200 OK')",
"def check(job, logger, **kwargs):\n resources = Resource.objects.filter(\n attributes__field__name=\"health_check_config\",\n lifecycle='ACTIVE'\n ).distinct()\n set_progress(\n f\"Will run health checks for {resources.count()} resource(s): \"\n f\"{[resource.name for resource in resources]}\")\n\n check_results = []\n\n for resource in resources:\n logger.info(f\"Will run health checks for resource '{resource.name}'.\")\n config_dict = get_config_value(resource)\n failing_health_checks = 0\n\n # Run all the health checks configured for this resource.\n for health_check in config_dict.get('health_checks', {}):\n max_retries = health_check.get('max_retries', 3)\n retry_interval_seconds = health_check.get('retry_interval_seconds', 1)\n\n name = health_check.get('name')\n job.set_progress(f\"Beginning health check '{name}'.\")\n url = health_check.get('url')\n accepted_statuses = health_check.get('accepted_status_codes')\n timeout_seconds = health_check.get('timeout_seconds', 3)\n\n retry_attempts = 0\n while retry_attempts <= max_retries:\n try:\n if retry_attempts > 1:\n logger.info(f\"On retry attempt {retry_attempts}.\")\n status_code = requests.get(url, timeout=timeout_seconds).status_code\n\n if accepted_statuses and status_code not in accepted_statuses:\n # Failure.\n msg = (\n f\"HTTP Request returned {status_code}, \"\n f\"which is not in the accepted statuses: {accepted_statuses}\"\n f\"for health check '{name}'.\"\n )\n logger.debug(msg)\n retry_attempts += 1\n else:\n # Pass - We got a valid status. We can stop now.\n logger.info(f\"Health check '{name}' completed with success.\")\n break\n\n except Exception as e:\n # Bad, could be ConnectionError, which will count as a failure.\n logger.debug(e)\n retry_attempts += 1\n\n # Wait for the specified retry interval before trying again\n time.sleep(retry_interval_seconds)\n\n if retry_attempts == max_retries:\n job.set_progress(f\"Max retries exceeded for health check '{name}'.\")\n failing_health_checks += 1\n\n # Summarize this resource's health check results.\n data_dict = {\n 'time': datetime.datetime.now(),\n 'resource_id': resource.id,\n 'resource_name': resource.name,\n 'failing_health_checks': failing_health_checks,\n }\n\n check_results.append(data_dict)\n\n context = {\n \"health_check_results\": check_results,\n }\n\n # Return the dict to be processed by the \"Then\" action\n return 'SUCCESS', '', '', {'context': context}",
"def test_health_check(self):\n result = self.app.get('/v1/health')\n\n # assert the status code of the response 200 (OK)\n self.assertEqual(result.status_code, 200)\n self.assertEqual(result.data, b'UP')"
] | [
"0.67506087",
"0.67506087",
"0.6347388",
"0.6283836",
"0.61198914",
"0.6003206",
"0.5874187",
"0.5865065",
"0.5828394",
"0.58038205",
"0.58038205",
"0.58012503",
"0.57780075",
"0.5776028",
"0.57574344",
"0.5757239",
"0.5757239",
"0.5735045",
"0.57179314",
"0.57096285",
"0.57096285",
"0.57096285",
"0.57096285",
"0.57096285",
"0.57096285",
"0.5697964",
"0.5693128",
"0.56713957",
"0.563169",
"0.560203"
] | 0.6842051 | 0 |
Subdomain URL that App Runner generated for this service. You can use this URL to access your service web application. | def service_url(self) -> pulumi.Output[str]:
return pulumi.get(self, "service_url") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def base_url(self):\n return \"http://{0}:{1}/app\".format(self.host, self.port)",
"def public_rest_url(path_url: str, domain: str = CONSTANTS.DEFAULT_DOMAIN) -> str:\n return CONSTANTS.FTX_BASE_URL + path_url",
"def get_service_url():\n return get_config_handler().get_service_url()",
"def app_url(self):\n return self.request.host_url",
"def subdomain(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"subdomain\")",
"def service_url(self):\n return \"http://127.0.0.1:%d/wd/hub\"%self.port",
"def service_endpoint(self) -> str:\n return pulumi.get(self, \"service_endpoint\")",
"def application_host_url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_host_url\")",
"def api_url(self):\n return self.get_api_url()",
"def web_url(self) -> str:\n return pulumi.get(self, \"web_url\")",
"def api_url(self) -> httpx.URL:\n return self._client.base_url",
"def _base_url(self):\n # URL Protocol\n proto = 'https' if self._ssl else 'http'\n\n # Device port number\n if self._port is None:\n port = 8080 if self._ssl else 8008\n else:\n port = self._port\n \n return f'{proto}://{self._address}:{port}/api/v1'",
"def url(self) -> str:\n return (\n f\"{self.service_subscription.url}/service-instances/service-instance/{self.instance_id}\"\n )",
"def url(self):\n return urls.Url(\n path=self.serving_path,\n host=self.pod.env.host,\n port=self.pod.env.port,\n scheme=self.pod.env.scheme)",
"def service_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_url\")",
"def service_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_url\")",
"def url(self):\n if not self._is_served:\n raise RuntimeError('Cannot determine app url if app is not yet \"served\".')\n elif not (_current_server and _current_server.serving):\n raise RuntimeError('Cannot determine app url if the server is not '\n 'yet running.')\n else:\n host, port = _current_server.serving\n return 'http://%s:%i/%s/' % (host, port, self._path)",
"def subdomain(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subdomain\")",
"def api_url(self, endpoint):\n\n return '{}/{}'.format(self.api_root, endpoint)",
"def _get_base_url(self):\n return 'https://'+self.get_address_and_port_string()",
"def api_endpoint(self) -> str:\n return pulumi.get(self, \"api_endpoint\")",
"def basic_url(self):\n return self.base_name + '.cloudlabs.rc.ucl.ac.uk'",
"def api_url(self):\n return self._api_url",
"def url(self):\n return self._client.url",
"def url(self) -> str:\n return pulumi.get(self, \"url\")",
"def url(self) -> str:\n return pulumi.get(self, \"url\")",
"def url_base():\n return \"https://dev-yourOrg.us.auth0.com\"",
"def apiurl(self):\n return self._apiurl",
"def getSubdomain(self):\n\t\treturn self.Subdomain",
"def customsubdomain(self) -> Optional[str]:\n return pulumi.get(self, \"customsubdomain\")"
] | [
"0.6826508",
"0.6681023",
"0.6672236",
"0.6605209",
"0.6551995",
"0.65030795",
"0.6468015",
"0.64567655",
"0.6423532",
"0.63967156",
"0.6384149",
"0.63212645",
"0.6273218",
"0.6251125",
"0.6244899",
"0.6244899",
"0.6230171",
"0.62126696",
"0.6212174",
"0.62013507",
"0.61964345",
"0.6186021",
"0.61741275",
"0.6161792",
"0.616153",
"0.616153",
"0.6159532",
"0.6144639",
"0.6126382",
"0.60999143"
] | 0.68187 | 1 |
Given a model name, returns a pytorch transformers model in eval mode. | def load_torchtransformers(model_name):
# There are two versions of huggingface, support both
try:
import pytorch_transformers
except ModuleNotFoundError:
import transformers as pytorch_transformers
if model_name == "bert":
tokenizer = pytorch_transformers.BertTokenizer.from_pretrained('bert-base-uncased')
model = pytorch_transformers.BertModel.from_pretrained('bert-base-uncased', torchscript=True)
input_data = torch.tensor([tokenizer.encode(text="Here is some text to encode", add_special_tokens=True)])
elif model_name == "transformer_xl":
tokenizer = pytorch_transformers.TransfoXLTokenizer.from_pretrained('transfo-xl-wt103')
model = pytorch_transformers.TransfoXLModel.from_pretrained('transfo-xl-wt103', torchscript=True)
input_data = torch.tensor([tokenizer.encode(text="Here is some text to encode", add_special_tokens=True)])
else:
raise ValueError(f'{model_name} is not supported. Unknown model name.')
model = model.eval()
return model, [input_data] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_model(name, disable_logging=False):\n return PluginLoader._import(\"train.model\", name, disable_logging)",
"def load_simple_transformer(model_name):\n model = torch.nn.Transformer(nhead=2, num_encoder_layers=1, num_decoder_layers=1)\n model = model.eval()\n src = torch.rand((10, 32, 512))\n tgt = torch.rand((20, 32, 512))\n return model, [src, tgt]",
"def load_model(model_name):\n if hasattr(torchvision.models, model_name):\n with torch.no_grad():\n if model_name.startswith(\"inception\"):\n height = width = 299\n mean = [0.5, 0.5, 0.5]\n std = [0.5, 0.5, 0.5]\n else:\n height = width = 224\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n input_shape = [1, 3, height, width]\n input_data = torch.randn(input_shape).float()\n for channel in range(3):\n input_data[:, channel] -= mean[channel]\n input_data[:, channel] /= std[channel]\n model = getattr(torchvision.models, model_name)(pretrained=True)\n model = model.float().eval()\n return model, [input_data]\n try:\n import pretrainedmodels\n if hasattr(pretrainedmodels, model_name):\n return load_pretrainedmodels(model_name)\n except ModuleNotFoundError:\n raise ModuleNotFoundError(\"Please install pretrainedmodels.pytorch\")\n raise RuntimeError(\"Model not supported\")",
"def get_model(model: str) -> Any:\n try:\n model_function = eval(model)\n except (NameError, AttributeError) as err:\n sys.exit(f'{err}. Accepted models from {tf}, {sm}, {tfa}, {tfc}')\n return model_function",
"def get_model(model_name: str = \"\", cfg={}) -> torch.nn.Module:\n if model_name == \"default\":\n model = AudioNTT2020(n_mels=cfg.n_mels, d=cfg.feature_d)\n\n elif model_name == \"resnetish34\":\n model = resnetish34()\n\n elif model_name == \"clstm\":\n model = CLSTM()\n\n elif model_name == \"cvt\":\n s1_depth, s2_depth, s3_depth = cfg.depths\n s1_emb_dim, s2_emb_dim, s3_emb_dim = cfg.embed_dims\n s1_mlp_mult, s2_mlp_mult, s3_mlp_mult = cfg.mlp_mults\n\n model = CvT(\n s1_emb_dim=s1_emb_dim,\n s1_depth=s1_depth,\n s1_mlp_mult=s1_mlp_mult,\n s2_emb_dim=s2_emb_dim,\n s2_depth=s2_depth,\n s2_mlp_mult=s2_mlp_mult,\n s3_emb_dim=s3_emb_dim,\n s3_depth=s3_depth,\n s3_mlp_mult=s3_mlp_mult,\n pool=cfg.cvt_pool,\n )\n else:\n raise ValueError(\"Model not found.\")\n return model",
"def load_torchvision(model_name):\n # Lazy import as torchvision may not be required.\n import torchvision\n\n with torch.no_grad():\n if model_name.startswith(\"inception\"):\n height = width = 299\n mean = [0.5, 0.5, 0.5]\n std = [0.5, 0.5, 0.5]\n else:\n height = width = 224\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n input_shape = [1, 3, height, width]\n input_data = torch.randn(input_shape).float()\n for channel in range(3):\n input_data[:, channel] -= mean[channel]\n input_data[:, channel] /= std[channel]\n\n if model_name.startswith(\"googlenet\"):\n model = getattr(torchvision.models, model_name)(pretrained=True, aux_logits=True)\n else:\n model = getattr(torchvision.models, model_name)(pretrained=True)\n model = model.float().eval()\n return model, [input_data]",
"def load(model_path: str):\n model = torch.load(model_path)\n model.eval()\n return model",
"def get_transformer(model_name):\n model_class, tokenizer_class, pretrained_weights = TRANSFORMER_MODELS[model_name]\n model = model_class.from_pretrained(pretrained_weights,\n output_hidden_states=True)\n tokenizer = tokenizer_class.from_pretrained(pretrained_weights)\n\n return model, tokenizer, TRANSFORMER_EMBEDDING_DIMS[model_name]",
"def get_model(name):\n\n name_to_fun = {'audio': audio_model, 'video': video_model, 'both': combined_model}\n\n if name in name_to_fun:\n model = name_to_fun[name]\n else:\n raise ValueError('Requested name [{}] not a valid model'.format(name))\n\n def wrapper(*args, **kwargs):\n return recurrent_model(model(*args), **kwargs)\n\n return wrapper",
"def load_custom_model(model_name):\n if model_name==\"AlexNet\":\n print(\"Loading pretrained AlexNet Model\")\n model = models.alexnet()\n num_ftrs = model.classifier[6].in_features\n model.classifier[6] = nn.Linear(num_ftrs, 100)\n elif model_name==\"ResNet18\":\n print(\"Loading ResNet18 Model\")\n model = models.resnet18() #Load the pytorch. torchvision model\n num_ftrs = model.fc.in_features\n model.fc = nn.Linear(num_ftrs, 100) #Set it to match the ImageNet-100 Classes.\n elif model_name==\"ResNet50\":\n print(\"Loading ResNet50 Model\")\n model = models.resnet50()\n num_ftrs = model.fc.in_features\n model.fc = nn.Linear(num_ftrs, 100) #ImageNet-100 has 100 classes.\n elif model_name==\"DenseNet\":\n print(\"Loading DenseNet161 Model\")\n model = models.densenet161()\n num_ftrs = model.classifier.in_features\n model.classifier = nn.Linear(num_ftrs, 100)\n elif model_name==\"MyNet\":\n print(\"Loading Pyramid Model\")\n model = pyramid_net.create_model() # Load the model I implemented.\n\n if cfg.load_model_true: # Load the model that was stopped during training.\n model.load_state_dict(torch.load(cfg.load_model_path))\n\n return model",
"def get_model(model_name, type):\n\n MODEL_MAP = {\"torchvision\": ([\"*\"], load_torchvision),\n \"torchtransformers\": ([\"bert\", \"transformer_xl\"], load_torchtransformers),\n \"github\": ([\"deepspeech\"], load_deepspeech),\n \"custom\": ([\"simple_transformer\"], load_simple_transformer),\n \"op\": ([\"matmul1\", \"matmul2\", \"convolution1\", \"convolution2\"], load_single_operators)}\n\n if type not in MODEL_MAP:\n raise ValueError(f'{type} is not supported. Unknown type name.')\n\n model_map_item = MODEL_MAP[type]\n supported_model_names = model_map_item[0]\n\n if model_name not in supported_model_names and \\\n (len(supported_model_names) and supported_model_names[0] != \"*\"):\n raise ValueError(f'{model_name} is not supported. Unknown model name.')\n\n baseline_model, baseline_input = model_map_item[1](model_name)\n\n # Extract model to PyTorch graph\n if torch.cuda.is_available():\n if isinstance(baseline_model, torch.nn.Module):\n baseline_model = baseline_model.cuda()\n baseline_input = [inp.cuda() for inp in baseline_input]\n\n trace = torch.jit.trace(baseline_model, baseline_input)\n if isinstance(baseline_model, torch.nn.Module):\n trace = trace.float().eval()\n\n if torch.cuda.is_available():\n trace = trace.cuda()\n else:\n trace = trace.cpu()\n\n input_names = [\"input{}\".format(idx) for idx, inp in enumerate(baseline_input)]\n input_shapes = list(zip(input_names, [inp.shape for inp in baseline_input]))\n return trace, input_shapes",
"def get_model(name, dataset):\n field_dims = dataset.field_dims\n if name == 'lr':\n return LogisticRegressionModel(field_dims)\n elif name == 'fm':\n return FactorizationMachineModel(field_dims, embed_dim=16)\n elif name == 'hofm':\n return HighOrderFactorizationMachineModel(\n field_dims, order=3, embed_dim=16)\n elif name == 'ffm':\n return FieldAwareFactorizationMachineModel(field_dims, embed_dim=4)\n elif name == 'fnn':\n return FactorizationSupportedNeuralNetworkModel(\n field_dims, embed_dim=16, mlp_dims=(16, 16), dropout=0.2)\n elif name == 'wd':\n return WideAndDeepModel(\n field_dims, embed_dim=16, mlp_dims=(16, 16), dropout=0.2)\n elif name == 'ipnn':\n return ProductNeuralNetworkModel(\n field_dims,\n embed_dim=16,\n mlp_dims=(16, ),\n method='inner',\n dropout=0.2)\n elif name == 'opnn':\n return ProductNeuralNetworkModel(\n field_dims,\n embed_dim=16,\n mlp_dims=(16, ),\n method='outer',\n dropout=0.2)\n elif name == 'dcn':\n return DeepCrossNetworkModel(\n field_dims,\n embed_dim=16,\n num_layers=3,\n mlp_dims=(16, 16),\n dropout=0.2)\n elif name == 'nfm':\n return NeuralFactorizationMachineModel(\n field_dims, embed_dim=64, mlp_dims=(64, ), dropouts=(0.2, 0.2))\n elif name == 'ncf':\n # only supports MovieLens dataset because for other datasets user/item colums are indistinguishable\n assert isinstance(dataset, MovieLens20MDataset) or isinstance(\n dataset, MovieLens1MDataset)\n return NeuralCollaborativeFiltering(\n field_dims,\n embed_dim=16,\n mlp_dims=(16, 16),\n dropout=0.2,\n user_field_idx=dataset.user_field_idx,\n item_field_idx=dataset.item_field_idx)\n elif name == 'fnfm':\n return FieldAwareNeuralFactorizationMachineModel(\n field_dims, embed_dim=4, mlp_dims=(64, ), dropouts=(0.2, 0.2))\n elif name == 'dfm':\n return DeepFactorizationMachineModel(\n field_dims, embed_dim=16, mlp_dims=(16, 16), dropout=0.2)\n elif name == 'xdfm':\n return ExtremeDeepFactorizationMachineModel(\n field_dims,\n embed_dim=16,\n cross_layer_sizes=(16, 16),\n split_half=False,\n mlp_dims=(16, 16),\n dropout=0.2)\n elif name == 'afm':\n return AttentionalFactorizationMachineModel(\n field_dims, embed_dim=16, attn_size=16, dropouts=(0.2, 0.2))\n elif name == 'afi':\n return AutomaticFeatureInteractionModel(\n field_dims,\n embed_dim=16,\n atten_embed_dim=64,\n num_heads=2,\n num_layers=3,\n mlp_dims=(400, 400),\n dropouts=(0, 0, 0))\n elif name == 'afn':\n print('Model:AFN')\n return AdaptiveFactorizationNetwork(\n field_dims,\n embed_dim=16,\n LNN_dim=1500,\n mlp_dims=(400, 400, 400),\n dropouts=(0, 0, 0))\n else:\n raise ValueError('unknown model name: ' + name)",
"def load_model(model_name):\n model = get_model(training = False)\n checkpoint = torch.load('../models/' + model_name)\n model.load_state_dict(checkpoint['model_state_dict'])\n return model",
"def load_model(model_path: str) -> object:\n model = torch.load(model_path)\n model.eval()\n return model",
"def buildModel(model_name):\n if model_name == \"resnet50\":\n model = kapp.resnet50.ResNet50(weights=\"imagenet\", include_top=False)\n return model, kapp.resnet50.preprocess_input\n elif model_name == \"vgg16\":\n model = kapp.vgg16.VGG16(weights=\"imagenet\", include_top=False)\n return model, kapp.vgg16.preprocess_input\n elif model_name == 'xception':\n model = kapp.xception.Xception(weights=\"imagenet\", include_top=False)\n return model, kapp.xception.preprocess_input\n elif model_name == 'vgg19':\n model = kapp.vgg19.VGG19(weights=\"imagenet\", include_top=False)\n return model, kapp.vgg19.preprocess_input\n elif model_name == 'inceptionv3':\n model = kapp.inception_v3.InceptionV3(weights=\"imagenet\", include_top=False)\n return model, kapp.inception_v3.preprocess_input\n elif model_name == 'mobilenet':\n model = kapp.mobilenet.MobileNet(weights=\"imagenet\", include_top=False)\n return model, kapp.mobilenet.preprocess_input\n else:\n raise Exception(\"Unsupported model error\")",
"def load_model(model_name):\n model_def_path = os.path.join(MODEL_DIR, model_name + \".py\")\n weights_path = os.path.join(MODEL_DIR, model_name + \".pth\")\n if six.PY3:\n import importlib.util\n\n spec = importlib.util.spec_from_file_location(model_name,\n model_def_path)\n mod = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(mod)\n else:\n import importlib\n dirname = os.path.dirname(model_def_path)\n sys.path.insert(0, dirname)\n module_name = os.path.splitext(os.path.basename(model_def_path))[0]\n mod = importlib.import_module(module_name)\n func = getattr(mod, model_name)\n net = func(weights_path=weights_path)\n net = modify_to_return_embeddings(net, model_name)\n return net",
"def get_model(name, **kwargs):\n models = {'standard_lstm_lm_200' : standard_lstm_lm_200,\n 'standard_lstm_lm_650' : standard_lstm_lm_650,\n 'standard_lstm_lm_1500': standard_lstm_lm_1500,\n 'awd_lstm_lm_1150': awd_lstm_lm_1150,\n 'awd_lstm_lm_600': awd_lstm_lm_600,\n 'big_rnn_lm_2048_512': big_rnn_lm_2048_512,\n 'elmo_2x1024_128_2048cnn_1xhighway': elmo_2x1024_128_2048cnn_1xhighway,\n 'elmo_2x2048_256_2048cnn_1xhighway': elmo_2x2048_256_2048cnn_1xhighway,\n 'elmo_2x4096_512_2048cnn_2xhighway': elmo_2x4096_512_2048cnn_2xhighway,\n 'transformer_en_de_512': transformer_en_de_512,\n 'bert_12_768_12' : bert_12_768_12,\n 'bert_24_1024_16' : bert_24_1024_16,\n 'distilbert_6_768_12' : distilbert_6_768_12,\n 'roberta_12_768_12' : roberta_12_768_12,\n 'roberta_24_1024_16' : roberta_24_1024_16,\n 'ernie_12_768_12' : ernie_12_768_12}\n name = name.lower()\n if name not in models:\n raise ValueError(\n 'Model %s is not supported. Available options are\\n\\t%s'%(\n name, '\\n\\t'.join(sorted(models.keys()))))\n return models[name](**kwargs)",
"def load_model(model_name):\r\n model = joblib.load(model_name)\r\n return model",
"def load_pretrainedmodels(model_name):\n\n # Lazy import as torchvision may not be required.\n import pretrainedmodels\n\n model = getattr(pretrainedmodels, model_name)().float().eval()\n input_shape = [1, *model.input_size]\n input_data = torch.rand(input_shape).float() * 256\n for channel in range(3):\n input_data[:, channel] -= model.mean[channel]\n input_data[:, channel] /= model.std[channel]\n return model, [input_data]",
"def load_model(model_name, MODEL_DIR):\n model_def_path = os.path.join(MODEL_DIR, model_name + '.py')\n weights_path = os.path.join(MODEL_DIR, model_name + '.pth')\n mod = load_module_2or3(model_name, model_def_path)\n func = getattr(mod, model_name)\n net = func(weights_path=weights_path)\n return net",
"def load_model(model, trained_models_dir, image_name):\n# if model == \"keras\":\n if model == 1:\n return load_keras_model(trained_models_dir, image_name)\n# elif model == \"lgb\":\n elif model == 3:\n return load_lgb_model(trained_models_dir, image_name)\n# elif model = \"sklearn\":\n else:\n return load_joblib_model(trained_models_dir, image_name)",
"def load_deepspeech(model_name):\n\n # For reference:\n # from deepspeech_pytorch.model import DeepSpeech\n # from torch.utils.model_zoo import load_url\n # import torch.onnx\n\n # pretrained_url = 'https://github.com/SeanNaren/deepspeech.pytorch/releases/download/v2.0/an4_pretrained_v2.pth'\n # params = load_url(pretrained_url)\n # model = DeepSpeech.load_model_package(params)\n # model.eval()\n # input_sizes = (1, 1, 161, 753)\n # input_data = torch.randn(*input_sizes).float()\n # input_sizes = torch.IntTensor([161]).int()\n # model(input_data, input_sizes)\n # return model, [input_data, input_sizes]\n\n raise NotImplementedError(\"TVM pytorch frontend doesn't support all the required \"\n \"operators for this model.\")",
"def load_pretrained_model(model_name):\n if model_name==\"AlexNet\":\n print(\"Loading pretrained AlexNet Model\")\n model_ft = models.alexnet(pretrained=True)\n\n for param in model_ft.parameters():# Code for fixing the Conv Layer\n param.requires_grad = False\n num_ftrs = model_ft.classifier[6].in_features\n model_ft.classifier[6] = nn.Linear(num_ftrs, 100)\n elif model_name==\"ResNet18\":\n print(\"Loading pretrained ResNet18 Model\")\n model_ft = models.resnet18(pretrained=True)\n\n for param in model_ft.parameters(): # Code for fixing the Conv Layer\n param.requires_grad = False # During Training Conv layer does not learn.\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, 100)\n elif model_name==\"ResNet50\":\n print(\"Loading pretrained ResNet50 Model\")\n\n model_ft = models.resnet50(pretrained=True)\n for param in model_ft.parameters():# Code for fixing the Conv Layer\n param.requires_grad = False\n\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, 100)\n elif model_name==\"DenseNet\":\n print(\"Loading pretrained DenseNet161 Model\")\n model_ft = models.densenet161(pretrained=True)\n\n for param in model_ft.parameters():# Code for fixing the Conv Layer\n param.requires_grad = False\n num_ftrs = model_ft.classifier.in_features\n model_ft.classifier = nn.Linear(num_ftrs, 100)\n\n if cfg.load_model_true:\n model_ft.load_state_dict(torch.load(cfg.load_model_path))\n\n return model_ft",
"def get_model(name, dataset):\r\n field_dims = dataset.field_dims\r\n\r\n if name == 'ncf':\r\n # only supports MovieLens dataset because for other datasets user/item colums are indistinguishable\r\n assert isinstance(dataset, MovieLens1MDataset)\r\n return NeuralCollaborativeFiltering(field_dims, embed_dim=16, mlp_dims=(16, 16), dropout=0.2,\r\n user_field_idx=dataset.user_field_idx,\r\n item_field_idx=dataset.item_field_idx)\r\n else:\r\n raise ValueError('unknown model name: ' + name)",
"def get_model(model_name: str, map_location=torch.device('cpu')):\n # model urls on Zenodo\n model_urls = {'ParallelNets': 'https://zenodo.org/record/7245516/files/ParallelNets.pth?download=1',\n 'UNetPath': 'https://zenodo.org/record/7245516/files/UNetPath.pth?download=1'}\n\n # check if model_name is supported\n if model_name not in ['ParallelNets', 'UNetPath']:\n raise ValueError(\"Model name needs to be 'ParallelNets' or 'UNetPath'.\")\n\n model_path = pkg_resources.resource_filename('crackpy', f'crack_detection/models/{model_name}.pth')\n\n # check if model folder exists\n origin, _ = os.path.split(model_path)\n if not os.path.exists(origin):\n os.makedirs(origin)\n\n if not os.path.exists(model_path):\n print(f\"Downloading {model_name}...\")\n torch.hub.download_url_to_file(model_urls[model_name], model_path)\n\n if model_name == 'ParallelNets':\n model = ParallelNets(in_ch=2, out_ch=1, init_features=64)\n model.load_state_dict(torch.load(model_path, map_location=map_location))\n else: # model_name == 'UNetPath'\n model = UNet(in_ch=2, out_ch=1, init_features=64)\n model.load_state_dict(torch.load(model_path, map_location=map_location))\n\n return model",
"def get_model(name):\n\n try:\n from .model_defs import get_model_from_def\n model = get_model_from_def(name)\n logger.info(\"Model {n} loaded from model_defs module\".format(n=name))\n except NameError:\n try:\n model = get_model_from_yaml(name)\n logger.info(\"Model {n} loaded from yaml\".format(n=name))\n except KeyError:\n try:\n from .model_defs import parse_model_name\n model = parse_model_name(name)\n logger.info(\"Model {n} parsed from name\".format(n=name))\n except NameError:\n sys.exit(\"Unknown model {n}\".format(n=name))\n\n if not hasattr(model, 'name'):\n model.name = name\n\n return model",
"def loadModel(name, path=None):\n\n # if a path is given, try to load from that path first\n if path:\n try:\n model = TFT5ForConditionalGeneration.from_pretrained(path)\n tokenizer = T5Tokenizer.from_pretrained(path)\n \n return model, tokenizer\n except:\n print(f\"WARNING: Could not load the model from the path ({path}) specified with --from-pretrained flag. Trying to load '{name}' from cloud instead.\")\n\n # if no path was specified, or the load from path failed, try to load from cloud using the given model name\n model = TFT5ForConditionalGeneration.from_pretrained(name)\n tokenizer = T5Tokenizer.from_pretrained(name)\n \n return model, tokenizer",
"def get_model(model_name):\n module_name = 'strain.models.strain_' + model_name.lower()\n model_module = importlib.import_module(module_name)\n obj = getattr(model_module, model_name)\n return obj",
"def get_model(model_name):\n model = CNN().get_model(model_name=model_name)\n\n return model",
"def load_model(PATH):\n model = torch.load(PATH)\n model.eval()\n return model"
] | [
"0.697611",
"0.6974655",
"0.6780623",
"0.6676455",
"0.65731716",
"0.6506318",
"0.6412367",
"0.6401762",
"0.6367586",
"0.6244505",
"0.6217965",
"0.619761",
"0.6148934",
"0.61453503",
"0.61379516",
"0.6132714",
"0.6060599",
"0.6044852",
"0.59870446",
"0.59724617",
"0.5971283",
"0.5970529",
"0.5966898",
"0.5892766",
"0.5888794",
"0.588049",
"0.58671856",
"0.5862055",
"0.5849484",
"0.5843922"
] | 0.73867327 | 0 |
Load DeepSpeech LSTM model from GitHub repo. Unfortunately TVM does not currently support LSTM operators in the PyTorch frontend. This is also the case for most other frontends. | def load_deepspeech(model_name):
# For reference:
# from deepspeech_pytorch.model import DeepSpeech
# from torch.utils.model_zoo import load_url
# import torch.onnx
# pretrained_url = 'https://github.com/SeanNaren/deepspeech.pytorch/releases/download/v2.0/an4_pretrained_v2.pth'
# params = load_url(pretrained_url)
# model = DeepSpeech.load_model_package(params)
# model.eval()
# input_sizes = (1, 1, 161, 753)
# input_data = torch.randn(*input_sizes).float()
# input_sizes = torch.IntTensor([161]).int()
# model(input_data, input_sizes)
# return model, [input_data, input_sizes]
raise NotImplementedError("TVM pytorch frontend doesn't support all the required "
"operators for this model.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def model_fn(model_dir):\n print(\"Loading model.\")\n\n # First, load the parameters used to create the model.\n model_info = {}\n model_info_path = os.path.join(model_dir, 'model_info.pth')\n with open(model_info_path, 'rb') as f:\n model_info = torch.load(f)\n\n print(\"model_info: {}\".format(model_info))\n\n # Determine the device and construct the model.\n device = torch.device(\"cpu\" if torch.cuda.is_available() else \"cpu\")\n #model = LSTM(model_info['embedding_dim'], model_info['hidden_dim'], model_info['vocab_size'])\n model = LSTM(model_info['num_classes'], model_info['input_size'], model_info['hidden_size'], model_info['num_layers'])\n\n # Load the stored model parameters.\n model_path = os.path.join(model_dir, 'model.pth')\n with open(model_path, 'rb') as f:\n model.load_state_dict(torch.load(f))\n\n model.to(device).eval()\n\n print(\"Done loading model.\")\n return model",
"def load_single_lstm_model(device, path):\n saved_model_data = torch.load(path, map_location=device)\n train_args = saved_model_data['args']\n model = build_eval_model_from_args(train_args, saved_model_data, device)\n return [model, train_args]",
"def model_fn(model_dir):\n print(\"Loading model.\")\n\n # First, load the parameters used to create the model.\n model_info = {}\n model_info_path = os.path.join(model_dir, 'model_info.pth')\n with open(model_info_path, 'rb') as f:\n model_info = torch.load(f)\n\n print(\"model_info: {}\".format(model_info))\n\n # Determine the device and construct the model.\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = LSTMClassifier(model_info['embedding_dim'], model_info['hidden_dim'], model_info['vocab_size'])\n\n # Load the store model parameters.\n model_path = os.path.join(model_dir, 'model.pth')\n with open(model_path, 'rb') as f:\n model.load_state_dict(torch.load(f))\n\n # Load the saved word_dict.\n word_dict_path = os.path.join(model_dir, 'word_dict.pkl')\n with open(word_dict_path, 'rb') as f:\n model.word_dict = pickle.load(f)\n\n model.to(device).eval()\n\n print(\"Done loading model.\")\n return model",
"def pretrained(name=\"sentimentdl_use_imdb\", lang=\"en\", remote_loc=None):\n from sparknlp.pretrained import ResourceDownloader\n return ResourceDownloader.downloadModel(SentimentDLModel, name, lang, remote_loc)",
"def model_fn(model_dir):\n print(\"Loading model.\")\n\n # First, load the parameters used to create the model.\n model_info = {}\n model_info_path = os.path.join(model_dir, 'model_info.pth')\n with open(model_info_path, 'rb') as f:\n model_info = torch.load(f)\n\n print(\"model_info: {}\".format(model_info))\n\n # Determine the device and construct the model.\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = LSTMClassifier(model_info['embedding_dim'], model_info['hidden_dim'], model_info['vocab_size'])\n\n # Load the store model parameters.\n model_path = os.path.join(model_dir, 'model.pth')\n with open(model_path, 'rb') as f:\n model.load_state_dict(torch.load(f))\n\n # Load the saved transformers.\n transformer_path = os.path.join(model_dir, 'transformers.pkl')\n with open(transformer_path, 'rb') as f:\n model.transformer = pickle.load(f)\n\n model.to(device).eval()\n\n print(\"Done loading model.\")\n return model",
"def test_word_lstm(self, resource_loader):\n config = {\n \"model_type\": \"tagger\",\n \"example_type\": ENTITY_EXAMPLE_TYPE,\n \"label_type\": ENTITIES_LABEL_TYPE,\n \"model_settings\": {\"classifier_type\": \"lstm-pytorch\"},\n \"params\": {\"emb_dim\": 30},\n }\n examples = self.labeled_data.queries()\n labels = self.labeled_data.entities()\n\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"add_terminals\": \"True\"}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"use_crf_layer\": False}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)",
"def load_model(self):\n if os.stat('code/lr-model.pt').st_size == 0:\n return\n params = torch.load('code/lr-model.pt')\n self.set_params(params)",
"def load_models(lstm_model_path, model_history_path, config_file_path):\n\tlstm_model = load_model(lstm_model_path, custom_objects={'rmse': rmse, 'Lookahead': Lookahead, 'RAdam': RAdam})\n\tlstm_model.summary(line_length=180)\n\t# Printing the input and output model's layer names. They will be needed in the Classifier to load the LSTM models correctly.\n\tprint(\"-----_model_inputs------\")\n\tprint(lstm_model.inputs)\n\tprint(\"----- model_outputs------\")\n\tprint(lstm_model.outputs)\n\tprint(\"---------------------\")\n\twith open(model_history_path) as json_file:\n\t\ttrain_history = json.load(json_file)\n\twith open(config_file_path) as config_file:\n\t\tparameters = json.load(config_file)\n\treturn lstm_model, train_history, parameters",
"def model_fn(model_dir):\n print(\"> Loading model...\")\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n \n # Read model init arguments from model_info\n model_info = {}\n model_info_path = os.path.join(model_dir, 'model_info.pth')\n with open(model_info_path, 'rb') as f:\n model_info = torch.load(f)\n print(\"> model_info: {}\".format(model_info))\n\n # Rebuild model from info\n model = LSTMRegressor(input_size=model_info['input_size'],\n input_channels=model_info['input_channels'],\n c_filters=model_info['c_filters'],\n c_kernel_size=model_info['c_kernel_size'],\n lstm_layers=model_info['lstm_layers'],\n lstm_hidden=model_info['lstm_hidden'],\n dropout=model_info['dropout'],\n output_size=model_info['output_size'])\n\n # Restore model\n model_path = os.path.join(model_dir, 'model.pth')\n with open(model_path, 'rb') as f:\n model.load_state_dict(torch.load(f))\n\n model.double().to(device).eval()\n\n print(\"> Model loading: Finished\")\n return model",
"def load_model(model_path):\n nlp = spacy.blank('en') \n if 'ner' not in nlp.pipe_names:\n ner = nlp.create_pipe('ner')\n nlp.add_pipe(ner)\n #load pretrained model from the path\n ner = nlp.from_disk(model_path)\n return ner",
"def pretrained(name=\"lemma_antbnc\", lang=\"en\", remote_loc=None):\n from sparknlp.pretrained import ResourceDownloader\n return ResourceDownloader.downloadModel(LemmatizerModel, name, lang, remote_loc)",
"def pretrained(name=\"sentence_detector_dl\", lang=\"en\", remote_loc=None):\n from sparknlp.pretrained import ResourceDownloader\n return ResourceDownloader.downloadModel(SentenceDetectorDLModel, name, lang, remote_loc)",
"def load_pretrained_rnn(model_dir):\n rnn_params = json.load(open(os.path.join(model_dir,\n \"./model.json\")))[\"rnn\"]\n\n logging.info(\"Loading model from: {}\".format(model_dir))\n rnn = RNN_model(model_fn = RNN_model.set_model_from_file,\n model_dir = model_dir,\n **rnn_params)\n\n # Compile model\n rnn.model_fn()\n\n return rnn",
"def load_model():\n global model_tok, model_mlm, model, model_cls\n if model is None:\n model_name_or_path = os.getenv('TRANSFORMER_MODEL', default='distilbert-base-multilingual-cased')\n # 'bert-base-multilingual-cased'\n model_tok = AutoTokenizer.from_pretrained(model_name_or_path)\n model_mlm = AutoModelForMaskedLM.from_pretrained(model_name_or_path)\n model_mlm.eval()\n model = model_mlm.base_model\n\n if isinstance(model_mlm, BertPreTrainedModel):\n model_cls = model_mlm.cls\n elif isinstance(model_mlm, DistilBertPreTrainedModel):\n model_cls = nn.Sequential(\n model_mlm.vocab_transform,\n nn.GELU(),\n model_mlm.vocab_layer_norm,\n model_mlm.vocab_projector\n )\n else:\n raise ValueError(f'{model_name_or_path} is not supported yet. try one of '\n f'{\", \".join(list(AvailableModels.__members__.keys()))}')\n model.to(device)\n model_mlm.to(device)\n # model_tok.to(device)\n model_cls.to(device)",
"def load_trained_net(mal):\n model_root = os.path.join(os.getcwd(), 'data', 'models')\n model = load_model(os.path.join(model_root, 'model_' + mal + '.h5'))\n\n return model",
"def load(self, model_name_or_path):\n return BertMLM(model_name_or_path, self.top_k)",
"def import_model(path=None):\n path = get_model_path() if path is None else path\n return torch.jit.load(path)",
"def load_model(\n model_path=filepath + \"/trained_models/hi2en/\", model_file_name=\"model.h5\"\n):\n model_path = (\n filepath + \"/trained_models/{}/\".format(model_path)\n if model_path in [\"en2hi\", \"hi2en\"]\n else model_path\n )\n config = SConfig(configuration_file=model_path + \"config.pkl\")\n s2s = Seq2Seq(config)\n s2s.load_model(path_to_model=model_path, model_file_name=model_file_name)\n return s2s",
"def load_tf_s2s(filepath: Union[str, os.PathLike],\n state_dict: Dict) -> tf.keras.Model:\n model_dir = Path(filepath).joinpath('model')\n if not [f.name for f in model_dir.glob('[!.]*.h5')]:\n logger.warning('No seq2seq or threshold estimation net found in {}.'.format(model_dir))\n return None\n # load threshold estimator net, initialize encoder and decoder and load seq2seq weights\n threshold_net = tf.keras.models.load_model(model_dir.joinpath('threshold_net.h5'), compile=False)\n latent_dim = state_dict['latent_dim']\n n_features = state_dict['shape'][-1]\n encoder_net = EncoderLSTM(latent_dim)\n decoder_net = DecoderLSTM(latent_dim, n_features, state_dict['output_activation'])\n seq2seq = Seq2Seq(encoder_net, decoder_net, threshold_net, n_features, beta=state_dict['beta'])\n seq2seq.load_weights(model_dir.joinpath('seq2seq.ckpt'))\n return seq2seq",
"def test_glove_lstm(self, resource_loader):\n config = {\n \"model_type\": \"tagger\",\n \"example_type\": ENTITY_EXAMPLE_TYPE,\n \"label_type\": ENTITIES_LABEL_TYPE,\n \"model_settings\": {\"classifier_type\": \"lstm-pytorch\"},\n \"params\": {\"embedder_type\": \"glove\"},\n }\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n examples = self.labeled_data.queries()\n labels = self.labeled_data.entities()\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {**config, \"params\": {**config[\"params\"], \"use_crf_layer\": False}}\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)",
"def pretrained(name=\"ld_wiki_tatoeba_cnn_21\", lang=\"xx\", remote_loc=None):\n from sparknlp.pretrained import ResourceDownloader\n return ResourceDownloader.downloadModel(LanguageDetectorDL, name, lang, remote_loc)",
"def load_model(model, path):\n\tmodel.load_state_dict(torch.load(path))\n\tprint(\"pre-trained model loaded from {}\".format(path))",
"def load_lyapunov_nn(lyapunov_nn, full_path):\n loaded_state_dict = torch.load(full_path)\n lyapunov_nn.lyapunov_function.net.load_state_dict(loaded_state_dict)\n return lyapunov_nn",
"def pretrained(name=\"longformer_base_sequence_classifier_imdb\", lang=\"en\", remote_loc=None):\n from sparknlp.pretrained import ResourceDownloader\n return ResourceDownloader.downloadModel(LongformerForSequenceClassification, name, lang, remote_loc)",
"def load_trained_model(unit):\n return load_model(DATA_FOLDER + \"{}_cdae_model.hd5\".format(UNITS[unit]))",
"def pretrained(name=\"t5_small\", lang=\"en\", remote_loc=None):\n from sparknlp.pretrained import ResourceDownloader\n return ResourceDownloader.downloadModel(T5Transformer, name, lang, remote_loc)",
"def pretrained(name=\"tfhub_use\", lang=\"en\", remote_loc=None):\n from sparknlp.pretrained import ResourceDownloader\n return ResourceDownloader.downloadModel(UniversalSentenceEncoder, name, lang, remote_loc)",
"def load_model(self):\n try:\n self.model = Word2Vec.load(self.config[\"model_path\"])\n self.model.init_sims(replace=True)\n except Exception as e:\n print(e)\n print(\"error in model loading!\")",
"def lstm_model(nlstm=128, layer_norm=False):\n\n def network_fn(X, nenv=1, obs_size=-1):\n with tf.variable_scope(\"emb\", reuse=tf.AUTO_REUSE):\n w_emb = tf.get_variable(\"w_emb\", [obs_size+1, 32])\n X = tf.nn.embedding_lookup(w_emb, X)\n\n nbatch = X.shape[0]\n nsteps = nbatch // nenv\n\n h = tf.layers.flatten(X)\n\n M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1)\n S = tf.placeholder(tf.float32, [nenv, 2*nlstm]) #states\n\n xs = batch_to_seq(h, nenv, nsteps)\n ms = batch_to_seq(M, nenv, nsteps)\n\n assert not layer_norm\n h5, snew = lstm(xs, ms, S, scope='lstm', nh=nlstm)\n\n h = seq_to_batch(h5)\n initial_state = np.zeros(S.shape.as_list(), dtype=float)\n\n return h, {'S':S, 'M':M, 'state':snew, 'initial_state':initial_state}\n\n return network_fn",
"def load(load_path: str):\n if not (load_path and os.path.isfile(load_path)):\n raise ValueError(f\"Invalid snapshot path{load_path}\")\n print(f\"Loading model from {load_path}...\")\n state = torch.load(load_path, map_location=lambda storage, loc: storage)\n config = pytext_config_from_json(state[CONFIG_JSON])\n\n task = create_task(\n config.task, metadata=state[DATA_STATE], model_state=state[MODEL_STATE]\n )\n return task, config"
] | [
"0.6424617",
"0.63462615",
"0.62104595",
"0.615085",
"0.61280006",
"0.61014485",
"0.5989856",
"0.59578437",
"0.5916136",
"0.5891427",
"0.5865856",
"0.58404297",
"0.5829503",
"0.57395333",
"0.5738998",
"0.57333404",
"0.5731952",
"0.5727657",
"0.5726606",
"0.5726429",
"0.57072586",
"0.5703024",
"0.5694408",
"0.5684347",
"0.56815386",
"0.5644407",
"0.56410277",
"0.5634913",
"0.5610081",
"0.5608343"
] | 0.6610076 | 0 |
A simple transformer from pytorch. | def load_simple_transformer(model_name):
model = torch.nn.Transformer(nhead=2, num_encoder_layers=1, num_decoder_layers=1)
model = model.eval()
src = torch.rand((10, 32, 512))
tgt = torch.rand((20, 32, 512))
return model, [src, tgt] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self,\n input_dim,\n dec_seq_len,\n out_seq_len,\n d_model=512,\n nhead=8,\n num_encoder_layers=6,\n num_decoder_layers=6,\n dim_feedforward=2048,\n dropout=0.1,\n activation='relu',\n custom_encoder=None,\n custom_decoder=None):\n super(TransformerTS, self).__init__()\n self.transform = nn.Transformer(\n d_model=d_model,\n nhead=nhead,\n num_encoder_layers=num_encoder_layers,\n num_decoder_layers=num_decoder_layers,\n dim_feedforward=dim_feedforward,\n dropout=dropout,\n activation=activation,\n custom_encoder=custom_encoder,\n custom_decoder=custom_decoder\n )\n self.pos = PositionalEncoding(d_model)\n self.enc_input_fc = nn.Linear(input_dim, d_model)\n self.dec_input_fc = nn.Linear(input_dim, d_model)\n self.out_fc = nn.Linear(dec_seq_len * d_model, out_seq_len)\n self.dec_seq_len = dec_seq_len",
"def __init__(self, args):\n super(Transformer, self).__init__()\n\n self.name = 'Transformer'\n if len(args.max_relative_pos) != args.nlayers:\n assert len(args.max_relative_pos) == 1\n args.max_relative_pos = args.max_relative_pos * args.nlayers\n\n self.embedder = Embedder(args)\n self.encoder = Encoder(args, self.embedder.enc_input_size)\n self.decoder_loc = nn.Linear(self.embedder.enc_input_size, 1)\n self.decoder_fix = nn.Linear(self.embedder.enc_input_size, 1)\n self.nobug_embedding = nn.Parameter(torch.zeros(1))\n self.criterion = nn.CrossEntropyLoss(reduction='none')",
"def forward_tensor(self, x):\n pass",
"def test_export_pytorch_model(self):\n pytorch_model = PyTorchLinear()\n dummy_input = torch.empty(10, 10)\n\n with io.BytesIO() as f:\n onnx_converter._export_pytorch_model(f, pytorch_model, dummy_input)",
"def __init__(self, transformer):\n self.transformer = transformer",
"def get_trans_net():\n return nn.Sequential(nn.Linear(2, 64), nn.LeakyReLU(), nn.Linear(64, 64), nn.LeakyReLU(), nn.Linear(64, 2))",
"def transform(self, X):\n ...",
"def transform(self, X):\n ...",
"def transform(self, X):\n ...",
"def transform(self, X):\n ...",
"def transform(self, X):\n ...",
"def transform(self, X):\n ...",
"def transform(self, X):\n ...",
"def PyTorchWrapper(\n pytorch_model: Any,\n convert_inputs: Optional[Callable] = None,\n convert_outputs: Optional[Callable] = None,\n) -> Model[Any, Any]:\n if convert_inputs is None:\n convert_inputs = convert_pytorch_default_inputs\n if convert_outputs is None:\n convert_outputs = convert_pytorch_default_outputs\n return Model(\n \"pytorch\",\n forward,\n attrs={\"convert_inputs\": convert_inputs, \"convert_outputs\": convert_outputs},\n shims=[PyTorchShim(pytorch_model)],\n dims={\"nI\": None, \"nO\": None},\n )",
"def transform(self, X: Tensor) -> Tensor:\n pass # pragma: no cover",
"def _transform(self, X: Tensor) -> Tensor:\n pass # pragma: no cover",
"def compute_train_transform(seed=123456):\n random.seed(seed)\n torch.random.manual_seed(seed)\n \n # Transformation that applies color jitter with brightness=0.4, contrast=0.4, saturation=0.4, and hue=0.1\n color_jitter = transforms.ColorJitter(0.4, 0.4, 0.4, 0.1) \n \n train_transform = transforms.Compose([\n ##############################################################################\n # TODO: Start of your code. #\n # #\n # Hint: Check out transformation functions defined in torchvision.transforms #\n # The first operation is filled out for you as an example.\n ##############################################################################\n # Step 1: Randomly resize and crop to 32x32.\n transforms.RandomResizedCrop(32),\n # Step 2: Horizontally flip the image with probability 0.5\n transforms.RandomHorizontalFlip(p=0.5),\n # Step 3: With a probability of 0.8, apply color jitter (you can use \"color_jitter\" defined above.\n transforms.RandomApply([color_jitter], p=0.8),\n # Step 4: With a probability of 0.2, convert the image to grayscale\n # transforms.RandomApply([transforms.Grayscale()], p=0.2),\n transforms.RandomGrayscale(p=0.2),\n ##############################################################################\n # END OF YOUR CODE #\n ##############################################################################\n transforms.ToTensor(),\n transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])])\n return train_transform",
"def __init__(self):\n # Initializing the Model with the class\n super(Model, self).__init__()\n # torch.nn.Linear applies a Linear transformation. The first parameter is the size of each input sample. The second is the size of the output sample\n self.linear = torch.nn.Linear(1, 1)",
"def convert_to_torch_script(model, input_size):\n model.eval()\n\n # An example input you would normally provide to your model's forward() method.\n example = torch.rand(1, 3, input_size, input_size)\n\n # Use torch.jit.trace to generate a torch.jit.ScriptModule via tracing.\n traced_script_module = torch.jit.trace(model, example)\n\n return traced_script_module",
"def to_torch(batch, **kwargs):\n x = torch.from_numpy(np.array(batch, dtype='float32'))\n return x.view(*x.size()[:2], -1).permute(2, 0, 1)",
"def transform(self, x):",
"def __to_torch(self):\n self.adj = Variable(torch.LongTensor(self.adj))\n \n if self.cuda:\n self.adj = self.adj.cuda()",
"def torch(self):\n tensor = self.data * 2**self.scale\n \n # Check for and warn about errors in conversion\n if bad_conversion(self, tensor):\n warnings.warn(\"Underflow and/or overflow detected \"\n \"during torch() call\", RuntimeWarning)\n\n return tensor",
"def build_transform(self):\n if self.training:\n all_trans = [trans.BEVRandomHorizontalFlip(), trans.BEVToTensor()]\n else:\n all_trans = [trans.BEVToTensor()]\n\n self.transform = trans.Compose(all_trans)\n return self.transform",
"def transform():",
"def forward(self, input_tensor):\n rv = torch.randn(input_tensor.size(), device=self.device) * 0.02\n intermediate = input_tensor + rv\n for module in self.down:\n intermediate = module(intermediate)\n rv = torch.randn(intermediate.size(), device=self.device) * 0.02 + 1\n intermediate *= rv\n\n intermediate = intermediate.view(-1, self.width)\n\n for module in self.classifier:\n intermediate = module(intermediate)\n\n return intermediate",
"def transform():\n pass",
"def load_torchtransformers(model_name):\n\n # There are two versions of huggingface, support both\n try:\n import pytorch_transformers\n except ModuleNotFoundError:\n import transformers as pytorch_transformers\n\n if model_name == \"bert\":\n tokenizer = pytorch_transformers.BertTokenizer.from_pretrained('bert-base-uncased')\n model = pytorch_transformers.BertModel.from_pretrained('bert-base-uncased', torchscript=True)\n input_data = torch.tensor([tokenizer.encode(text=\"Here is some text to encode\", add_special_tokens=True)])\n elif model_name == \"transformer_xl\":\n tokenizer = pytorch_transformers.TransfoXLTokenizer.from_pretrained('transfo-xl-wt103')\n model = pytorch_transformers.TransfoXLModel.from_pretrained('transfo-xl-wt103', torchscript=True)\n input_data = torch.tensor([tokenizer.encode(text=\"Here is some text to encode\", add_special_tokens=True)])\n else: \n raise ValueError(f'{model_name} is not supported. Unknown model name.')\n\n model = model.eval()\n return model, [input_data]",
"def forward(self, x: torch.Tensor) -> torch.Tensor:\n return self.model(x)",
"def __init__(self):\n Transform.__init__(self)"
] | [
"0.6855274",
"0.658197",
"0.6179455",
"0.6175562",
"0.6171663",
"0.61527574",
"0.6151067",
"0.6151067",
"0.6151067",
"0.6151067",
"0.6151067",
"0.6151067",
"0.6151067",
"0.61124516",
"0.61082864",
"0.61082345",
"0.60755324",
"0.6045084",
"0.60307",
"0.6029154",
"0.60272676",
"0.6012366",
"0.60113436",
"0.6010703",
"0.59969366",
"0.5963893",
"0.5963125",
"0.5939746",
"0.58670604",
"0.5864998"
] | 0.7178562 | 0 |
Get a PyTorch model by type and name. Returns PyTorch trace and input shape dict. | def get_model(model_name, type):
MODEL_MAP = {"torchvision": (["*"], load_torchvision),
"torchtransformers": (["bert", "transformer_xl"], load_torchtransformers),
"github": (["deepspeech"], load_deepspeech),
"custom": (["simple_transformer"], load_simple_transformer),
"op": (["matmul1", "matmul2", "convolution1", "convolution2"], load_single_operators)}
if type not in MODEL_MAP:
raise ValueError(f'{type} is not supported. Unknown type name.')
model_map_item = MODEL_MAP[type]
supported_model_names = model_map_item[0]
if model_name not in supported_model_names and \
(len(supported_model_names) and supported_model_names[0] != "*"):
raise ValueError(f'{model_name} is not supported. Unknown model name.')
baseline_model, baseline_input = model_map_item[1](model_name)
# Extract model to PyTorch graph
if torch.cuda.is_available():
if isinstance(baseline_model, torch.nn.Module):
baseline_model = baseline_model.cuda()
baseline_input = [inp.cuda() for inp in baseline_input]
trace = torch.jit.trace(baseline_model, baseline_input)
if isinstance(baseline_model, torch.nn.Module):
trace = trace.float().eval()
if torch.cuda.is_available():
trace = trace.cuda()
else:
trace = trace.cpu()
input_names = ["input{}".format(idx) for idx, inp in enumerate(baseline_input)]
input_shapes = list(zip(input_names, [inp.shape for inp in baseline_input]))
return trace, input_shapes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_model(name, disable_logging=False):\n return PluginLoader._import(\"train.model\", name, disable_logging)",
"def get_model(\n model_type: str,\n model_name: str = None,\n num_classes: t.Optional[int] = 1000,\n input_shape: t.Optional[t.Tuple] = (3, 224, 224),\n model: t.Optional[Module] = None,\n model_path: t.Optional[str] = None,\n classifier_params: t.Optional[t.Dict] = None,\n show: bool = False,\n) -> torch.nn.Module:\n if model_type == 'classifier':\n if isinstance(model_path, str) and model_path.lower() == 'imagenet':\n pretrained = True\n else:\n pretrained = False\n m_facade = models_facade.ModelFacade(task='classification')\n parameters = dict(requires_grad=True, pretrained=pretrained)\n model = m_facade.get_model_class(model_definition=model_name)(**parameters)\n\n # Patch last linear layer if needed\n if num_classes is not None and num_classes != 1000:\n _patch_last_linear(model=model, num_classes=num_classes)\n\n elif model_type == 'opti-classifier':\n m_facade = models_facade.ModelFacade(task='opti-classification')\n if model_path.lower() == 'imagenet':\n pretrained = model_path.lower()\n else:\n pretrained = None\n\n if classifier_params is not None:\n model_params = classifier_params\n else:\n model_params = dict(\n backbone=model_name,\n depth=5,\n num_classes=num_classes,\n num_input_channels=input_shape[0],\n num_last_filters=128,\n dropout=0.2,\n pretrained=pretrained,\n unfreeze_encoder=True,\n custom_enc_start=False,\n use_complex_final=False,\n conv_type='default',\n bn_type='default',\n activation_type='relu',\n depthwise=False,\n )\n logging.info(f\"\\tArgument classifier_params is empty, use default:\\n\\t{model_params}\")\n model = m_facade.get_model_class(model_definition='basic_classifier')(**model_params)\n elif model_type == 'custom':\n if model is None:\n raise NotImplementedError('Parameter model_mode is set to \"custom\", but model not specified.')\n # TODO: Add segmentation, detection, OCR tasks\n else:\n raise NotImplementedError(\n f\"Model type {model_type} not implemented.\" f\"Use one of ['classifier', 'opti-classifier', 'custom']\"\n )\n\n if isinstance(model_path, str) and model_path != 'ImageNet':\n if os.path.exists(model_path):\n try:\n model.load_state_dict(torch.load(model_path))\n except RuntimeError:\n model.load_state_dict(torch.load(model_path)['model_state_dict'])\n except Exception:\n raise RuntimeError(\n 'Please provide model weights either as the whole file, '\n 'or as a \\'model_state_dict\\' part of the file'\n )\n else:\n raise FileNotFoundError(f\"No such file or directory: {model_path}\")\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = model.to(device)\n if show:\n summary(model, input_size=input_shape)\n return model",
"def get_model(model_name: str = \"\", cfg={}) -> torch.nn.Module:\n if model_name == \"default\":\n model = AudioNTT2020(n_mels=cfg.n_mels, d=cfg.feature_d)\n\n elif model_name == \"resnetish34\":\n model = resnetish34()\n\n elif model_name == \"clstm\":\n model = CLSTM()\n\n elif model_name == \"cvt\":\n s1_depth, s2_depth, s3_depth = cfg.depths\n s1_emb_dim, s2_emb_dim, s3_emb_dim = cfg.embed_dims\n s1_mlp_mult, s2_mlp_mult, s3_mlp_mult = cfg.mlp_mults\n\n model = CvT(\n s1_emb_dim=s1_emb_dim,\n s1_depth=s1_depth,\n s1_mlp_mult=s1_mlp_mult,\n s2_emb_dim=s2_emb_dim,\n s2_depth=s2_depth,\n s2_mlp_mult=s2_mlp_mult,\n s3_emb_dim=s3_emb_dim,\n s3_depth=s3_depth,\n s3_mlp_mult=s3_mlp_mult,\n pool=cfg.cvt_pool,\n )\n else:\n raise ValueError(\"Model not found.\")\n return model",
"def get_model(name):\n\n try:\n from .model_defs import get_model_from_def\n model = get_model_from_def(name)\n logger.info(\"Model {n} loaded from model_defs module\".format(n=name))\n except NameError:\n try:\n model = get_model_from_yaml(name)\n logger.info(\"Model {n} loaded from yaml\".format(n=name))\n except KeyError:\n try:\n from .model_defs import parse_model_name\n model = parse_model_name(name)\n logger.info(\"Model {n} parsed from name\".format(n=name))\n except NameError:\n sys.exit(\"Unknown model {n}\".format(n=name))\n\n if not hasattr(model, 'name'):\n model.name = name\n\n return model",
"def get_model(name, dataset):\n field_dims = dataset.field_dims\n if name == 'lr':\n return LogisticRegressionModel(field_dims)\n elif name == 'fm':\n return FactorizationMachineModel(field_dims, embed_dim=16)\n elif name == 'hofm':\n return HighOrderFactorizationMachineModel(\n field_dims, order=3, embed_dim=16)\n elif name == 'ffm':\n return FieldAwareFactorizationMachineModel(field_dims, embed_dim=4)\n elif name == 'fnn':\n return FactorizationSupportedNeuralNetworkModel(\n field_dims, embed_dim=16, mlp_dims=(16, 16), dropout=0.2)\n elif name == 'wd':\n return WideAndDeepModel(\n field_dims, embed_dim=16, mlp_dims=(16, 16), dropout=0.2)\n elif name == 'ipnn':\n return ProductNeuralNetworkModel(\n field_dims,\n embed_dim=16,\n mlp_dims=(16, ),\n method='inner',\n dropout=0.2)\n elif name == 'opnn':\n return ProductNeuralNetworkModel(\n field_dims,\n embed_dim=16,\n mlp_dims=(16, ),\n method='outer',\n dropout=0.2)\n elif name == 'dcn':\n return DeepCrossNetworkModel(\n field_dims,\n embed_dim=16,\n num_layers=3,\n mlp_dims=(16, 16),\n dropout=0.2)\n elif name == 'nfm':\n return NeuralFactorizationMachineModel(\n field_dims, embed_dim=64, mlp_dims=(64, ), dropouts=(0.2, 0.2))\n elif name == 'ncf':\n # only supports MovieLens dataset because for other datasets user/item colums are indistinguishable\n assert isinstance(dataset, MovieLens20MDataset) or isinstance(\n dataset, MovieLens1MDataset)\n return NeuralCollaborativeFiltering(\n field_dims,\n embed_dim=16,\n mlp_dims=(16, 16),\n dropout=0.2,\n user_field_idx=dataset.user_field_idx,\n item_field_idx=dataset.item_field_idx)\n elif name == 'fnfm':\n return FieldAwareNeuralFactorizationMachineModel(\n field_dims, embed_dim=4, mlp_dims=(64, ), dropouts=(0.2, 0.2))\n elif name == 'dfm':\n return DeepFactorizationMachineModel(\n field_dims, embed_dim=16, mlp_dims=(16, 16), dropout=0.2)\n elif name == 'xdfm':\n return ExtremeDeepFactorizationMachineModel(\n field_dims,\n embed_dim=16,\n cross_layer_sizes=(16, 16),\n split_half=False,\n mlp_dims=(16, 16),\n dropout=0.2)\n elif name == 'afm':\n return AttentionalFactorizationMachineModel(\n field_dims, embed_dim=16, attn_size=16, dropouts=(0.2, 0.2))\n elif name == 'afi':\n return AutomaticFeatureInteractionModel(\n field_dims,\n embed_dim=16,\n atten_embed_dim=64,\n num_heads=2,\n num_layers=3,\n mlp_dims=(400, 400),\n dropouts=(0, 0, 0))\n elif name == 'afn':\n print('Model:AFN')\n return AdaptiveFactorizationNetwork(\n field_dims,\n embed_dim=16,\n LNN_dim=1500,\n mlp_dims=(400, 400, 400),\n dropouts=(0, 0, 0))\n else:\n raise ValueError('unknown model name: ' + name)",
"def load_model(model_name):\n if hasattr(torchvision.models, model_name):\n with torch.no_grad():\n if model_name.startswith(\"inception\"):\n height = width = 299\n mean = [0.5, 0.5, 0.5]\n std = [0.5, 0.5, 0.5]\n else:\n height = width = 224\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n input_shape = [1, 3, height, width]\n input_data = torch.randn(input_shape).float()\n for channel in range(3):\n input_data[:, channel] -= mean[channel]\n input_data[:, channel] /= std[channel]\n model = getattr(torchvision.models, model_name)(pretrained=True)\n model = model.float().eval()\n return model, [input_data]\n try:\n import pretrainedmodels\n if hasattr(pretrainedmodels, model_name):\n return load_pretrainedmodels(model_name)\n except ModuleNotFoundError:\n raise ModuleNotFoundError(\"Please install pretrainedmodels.pytorch\")\n raise RuntimeError(\"Model not supported\")",
"def get_model(name):\n # Evil reflection\n model_name = name.lower()\n model_module = importlib.import_module('.'+model_name, cfg.model_pck)\n [(_, model_class)] = inspect.getmembers(\n model_module,\n lambda c: inspect.isclass(c) and sys.modules[c.__module__] == model_module)\n\n tf.logging.debug('Found class %s', model_class)\n return model_class",
"def _get_model_and_layer(self, model_name, layer, model_path):\n if model_name == \"resnet-18\":\n model = models.resnet18()\n model.load_state_dict(torch.load(model_path))\n if layer == \"default\":\n layer = model._modules.get(\"avgpool\")\n self.layer_output_size = 512\n else:\n layer = model._modules.get(layer)\n\n return model, layer\n else:\n raise KeyError(\"Model %s was not found\" % model_name)\n \"\"\"\n elif model_name == 'alexnet':\n model = models.alexnet(pretrained=True)\n if layer == 'default':\n layer = model.classifier[-2]\n self.layer_output_size = 4096\n else:\n layer = model.classifier[-layer]\n\n return model, layer\n \"\"\"",
"def get_model(model_name: str, map_location=torch.device('cpu')):\n # model urls on Zenodo\n model_urls = {'ParallelNets': 'https://zenodo.org/record/7245516/files/ParallelNets.pth?download=1',\n 'UNetPath': 'https://zenodo.org/record/7245516/files/UNetPath.pth?download=1'}\n\n # check if model_name is supported\n if model_name not in ['ParallelNets', 'UNetPath']:\n raise ValueError(\"Model name needs to be 'ParallelNets' or 'UNetPath'.\")\n\n model_path = pkg_resources.resource_filename('crackpy', f'crack_detection/models/{model_name}.pth')\n\n # check if model folder exists\n origin, _ = os.path.split(model_path)\n if not os.path.exists(origin):\n os.makedirs(origin)\n\n if not os.path.exists(model_path):\n print(f\"Downloading {model_name}...\")\n torch.hub.download_url_to_file(model_urls[model_name], model_path)\n\n if model_name == 'ParallelNets':\n model = ParallelNets(in_ch=2, out_ch=1, init_features=64)\n model.load_state_dict(torch.load(model_path, map_location=map_location))\n else: # model_name == 'UNetPath'\n model = UNet(in_ch=2, out_ch=1, init_features=64)\n model.load_state_dict(torch.load(model_path, map_location=map_location))\n\n return model",
"def get_model():\n SUPPORTED_DATASETS = ('imagenet', 'cifar10', 'mnist')\n\n # ensure the dataset is supported\n dataset = args.dataset.lower()\n if dataset not in SUPPORTED_DATASETS:\n raise ValueError('Dataset {} is not supported'.format(dataset))\n net = None\n cadene = None\n\n if args.dataset == 'cifar10':\n if args.model == \"mobilenet\":\n from models.mobilenet import MobileNet\n net = MobileNet(n_class=10)\n\n elif args.model == \"alexnet\":\n from models.alexnet import AlexNet\n net = AlexNet(n_class=10)\n # else:\n # net = _create_cifar10_model(arch, pretrained)\n\n elif args.dataset == 'imagenet':\n if args.model ==\"mobilenet\":\n from models.mobilenet import MobileNet\n net = MobileNet(n_class=1000)\n # else:\n # net, cadene = _create_imagenet_model(arch, pretrained)\n\n # elif args.dataset == 'mnist':\n # net = _create_mnist_model(arch, pretrained)\n\n if net is None:\n raise NotImplementedError\n \n return net.cuda() if use_cuda else net",
"def get_SF_model(model_type):\n if model_type == \"BiLSTM\":\n return keras.models.load_model(SF_BiLSTM_model_path)\n elif model_type == \"MLP\":\n SF_model = SF_module().to(DEVICE)\n SF_model.load_state_dict(torch.load(SF_MLP_model_path))\n return SF_model",
"def get_model(name, **kwargs):\n models = {'standard_lstm_lm_200' : standard_lstm_lm_200,\n 'standard_lstm_lm_650' : standard_lstm_lm_650,\n 'standard_lstm_lm_1500': standard_lstm_lm_1500,\n 'awd_lstm_lm_1150': awd_lstm_lm_1150,\n 'awd_lstm_lm_600': awd_lstm_lm_600,\n 'big_rnn_lm_2048_512': big_rnn_lm_2048_512,\n 'elmo_2x1024_128_2048cnn_1xhighway': elmo_2x1024_128_2048cnn_1xhighway,\n 'elmo_2x2048_256_2048cnn_1xhighway': elmo_2x2048_256_2048cnn_1xhighway,\n 'elmo_2x4096_512_2048cnn_2xhighway': elmo_2x4096_512_2048cnn_2xhighway,\n 'transformer_en_de_512': transformer_en_de_512,\n 'bert_12_768_12' : bert_12_768_12,\n 'bert_24_1024_16' : bert_24_1024_16,\n 'distilbert_6_768_12' : distilbert_6_768_12,\n 'roberta_12_768_12' : roberta_12_768_12,\n 'roberta_24_1024_16' : roberta_24_1024_16,\n 'ernie_12_768_12' : ernie_12_768_12}\n name = name.lower()\n if name not in models:\n raise ValueError(\n 'Model %s is not supported. Available options are\\n\\t%s'%(\n name, '\\n\\t'.join(sorted(models.keys()))))\n return models[name](**kwargs)",
"def _get_model_by_name(self):\n \n # Interpret the request data based on the expected row and column structure\n row_template = ['strData']\n col_headers = ['model_name']\n \n # Create a Pandas Data Frame for the request data\n self.request_df = utils.request_df(self.request, row_template, col_headers)\n \n # Initialize the persistent model\n self.model = PersistentModel()\n \n # Get the model name from the request dataframe\n self.model.name = self.request_df.loc[0, 'model_name']\n \n # Get the model from cache or disk\n self._get_model()\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(3)",
"def buildModel(model_name):\n if model_name == \"resnet50\":\n model = kapp.resnet50.ResNet50(weights=\"imagenet\", include_top=False)\n return model, kapp.resnet50.preprocess_input\n elif model_name == \"vgg16\":\n model = kapp.vgg16.VGG16(weights=\"imagenet\", include_top=False)\n return model, kapp.vgg16.preprocess_input\n elif model_name == 'xception':\n model = kapp.xception.Xception(weights=\"imagenet\", include_top=False)\n return model, kapp.xception.preprocess_input\n elif model_name == 'vgg19':\n model = kapp.vgg19.VGG19(weights=\"imagenet\", include_top=False)\n return model, kapp.vgg19.preprocess_input\n elif model_name == 'inceptionv3':\n model = kapp.inception_v3.InceptionV3(weights=\"imagenet\", include_top=False)\n return model, kapp.inception_v3.preprocess_input\n elif model_name == 'mobilenet':\n model = kapp.mobilenet.MobileNet(weights=\"imagenet\", include_top=False)\n return model, kapp.mobilenet.preprocess_input\n else:\n raise Exception(\"Unsupported model error\")",
"def get_model(*, name: str) -> typing.Optional[typing.Type]:\n return getattr(open_alchemy.models, name, None)",
"def load_torchvision(model_name):\n # Lazy import as torchvision may not be required.\n import torchvision\n\n with torch.no_grad():\n if model_name.startswith(\"inception\"):\n height = width = 299\n mean = [0.5, 0.5, 0.5]\n std = [0.5, 0.5, 0.5]\n else:\n height = width = 224\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n input_shape = [1, 3, height, width]\n input_data = torch.randn(input_shape).float()\n for channel in range(3):\n input_data[:, channel] -= mean[channel]\n input_data[:, channel] /= std[channel]\n\n if model_name.startswith(\"googlenet\"):\n model = getattr(torchvision.models, model_name)(pretrained=True, aux_logits=True)\n else:\n model = getattr(torchvision.models, model_name)(pretrained=True)\n model = model.float().eval()\n return model, [input_data]",
"def load_model(name, input_node):\n # Find the model class from its name\n all_models = models.get_models()\n net_class = [model for model in all_models if model.__name__ == name][0]\n\n # Construct and return the model\n return net_class({'data': input_node})",
"def get_model(name):\n\n name_to_fun = {'audio': audio_model, 'video': video_model, 'both': combined_model}\n\n if name in name_to_fun:\n model = name_to_fun[name]\n else:\n raise ValueError('Requested name [{}] not a valid model'.format(name))\n\n def wrapper(*args, **kwargs):\n return recurrent_model(model(*args), **kwargs)\n\n return wrapper",
"def get_model(model_type, sess, process_size, \n n_classes, training=False):\n x_dims = [process_size, process_size, 3]\n argin = {'sess': sess, \n 'x_dims': x_dims, \n 'n_classes': n_classes}\n if training:\n # If training, return a class\n if model_type == 'densenet':\n model = densenet_t\n if model_type == 'densenet_s':\n model = densenet_s_t\n if model_type == 'fcn8s':\n model = fcn8s_t\n if model_type == 'fcn8s_s':\n model = fcn8s_s_t\n if model_type == 'unet':\n model = unet_t\n if model_type == 'unet_s':\n model = unet_s_t\n\n else:\n\n if model_type == 'densenet':\n model = densenet(**argin)\n if model_type == 'densenet_s':\n model = densenet_s(**argin)\n if model_type == 'fcn8s':\n model = fcn8s(**argin)\n if model_type == 'fcn8s_s':\n model = fcn8s_s(**argin)\n if model_type == 'unet':\n model = unet(**argin)\n if model_type == 'unet_s':\n model = unet_s(**argin)\n\n return model",
"def get_model(args, num_classes):\n data_size = 224\n image = nn.Variable([1, 3, data_size, data_size])\n pimage = image_preprocess(image)\n pred, hidden = model_resnet.resnet_imagenet(\n pimage, num_classes, args.num_layers, args.shortcut_type, test=True, tiny=False)\n Model = namedtuple('Model', ['image', 'pred', 'hidden'])\n return Model(image, pred, hidden)",
"def loadModel(name, path=None):\n\n # if a path is given, try to load from that path first\n if path:\n try:\n model = TFT5ForConditionalGeneration.from_pretrained(path)\n tokenizer = T5Tokenizer.from_pretrained(path)\n \n return model, tokenizer\n except:\n print(f\"WARNING: Could not load the model from the path ({path}) specified with --from-pretrained flag. Trying to load '{name}' from cloud instead.\")\n\n # if no path was specified, or the load from path failed, try to load from cloud using the given model name\n model = TFT5ForConditionalGeneration.from_pretrained(name)\n tokenizer = T5Tokenizer.from_pretrained(name)\n \n return model, tokenizer",
"def get_model(name, dataset):\r\n field_dims = dataset.field_dims\r\n\r\n if name == 'ncf':\r\n # only supports MovieLens dataset because for other datasets user/item colums are indistinguishable\r\n assert isinstance(dataset, MovieLens1MDataset)\r\n return NeuralCollaborativeFiltering(field_dims, embed_dim=16, mlp_dims=(16, 16), dropout=0.2,\r\n user_field_idx=dataset.user_field_idx,\r\n item_field_idx=dataset.item_field_idx)\r\n else:\r\n raise ValueError('unknown model name: ' + name)",
"def load_custom_model(model_name):\n if model_name==\"AlexNet\":\n print(\"Loading pretrained AlexNet Model\")\n model = models.alexnet()\n num_ftrs = model.classifier[6].in_features\n model.classifier[6] = nn.Linear(num_ftrs, 100)\n elif model_name==\"ResNet18\":\n print(\"Loading ResNet18 Model\")\n model = models.resnet18() #Load the pytorch. torchvision model\n num_ftrs = model.fc.in_features\n model.fc = nn.Linear(num_ftrs, 100) #Set it to match the ImageNet-100 Classes.\n elif model_name==\"ResNet50\":\n print(\"Loading ResNet50 Model\")\n model = models.resnet50()\n num_ftrs = model.fc.in_features\n model.fc = nn.Linear(num_ftrs, 100) #ImageNet-100 has 100 classes.\n elif model_name==\"DenseNet\":\n print(\"Loading DenseNet161 Model\")\n model = models.densenet161()\n num_ftrs = model.classifier.in_features\n model.classifier = nn.Linear(num_ftrs, 100)\n elif model_name==\"MyNet\":\n print(\"Loading Pyramid Model\")\n model = pyramid_net.create_model() # Load the model I implemented.\n\n if cfg.load_model_true: # Load the model that was stopped during training.\n model.load_state_dict(torch.load(cfg.load_model_path))\n\n return model",
"def get_model(model_name, model_config, to_cuda,\n uniform_initialize_bn_weight=False, forward_is_infer=False):\n model = None\n if model_name == 'Tacotron2':\n if forward_is_infer:\n class Tacotron2__forward_is_infer(Tacotron2):\n def forward(self, inputs, input_lengths):\n return self.infer(inputs, input_lengths)\n model = Tacotron2__forward_is_infer(**model_config)\n else:\n model = Tacotron2(**model_config)\n elif model_name == 'WaveGlow':\n if forward_is_infer:\n class WaveGlow__forward_is_infer(WaveGlow):\n def forward(self, spect, sigma=1.0):\n return self.infer(spect, sigma)\n model = WaveGlow__forward_is_infer(**model_config)\n else:\n model = WaveGlow(**model_config)\n else:\n raise NotImplementedError(model_name)\n\n if uniform_initialize_bn_weight:\n init_bn(model)\n\n if to_cuda:\n model = model.cuda()\n return model",
"def get_model(model_name):\n model = CNN().get_model(model_name=model_name)\n\n return model",
"def load(model, name=\"store/base\"):\n if torch.cuda.is_available():\n pretrained_dict = torch.load(name + \".pt\")\n else:\n pretrained_dict = torch.load(name + \".pt\", map_location=torch.device('cpu'))\n print(\"Loaded\", name + \" model.\")\n model_dict = model.state_dict()\n model_dict.update(pretrained_dict)\n model.load_state_dict(model_dict)",
"def load_model(path_model, model_type, device):\n if model_type == 'torch':\n model = torch.load(path_model).to(device)\n if hasattr(model, 'linblocks'):\n for linblock in model.linblocks:\n linblock.to(device)\n model.eval()\n return model\n elif model_type == 'sklearn':\n raise NotImplementedError\n else:\n raise Exception('Model type not known.')",
"def make_model(self):\n if self.model_type=='densenet_121':\n model = self.make_densenet_121(self.weights)\n\n\n return model",
"def get_model_type(model_type, chosen_hparams,\n loss_form):\n if loss_form == \"MAE\":\n training_loss_object = tf.keras.losses.MeanAbsoluteError(\n name=\"training_loss\")\n self_supervised_loss_object = tf.keras.losses.MeanAbsoluteError(\n name=\"self_supervised_loss\")\n elif loss_form == \"MSE\":\n training_loss_object = tf.keras.losses.MeanSquaredError(\n name=\"training_loss\")\n self_supervised_loss_object = tf.keras.losses.MeanSquaredError(\n name=\"self_supervised_loss\")\n else:\n raise Exception(\"The loss type is not supported\")\n\n if model_type == \"lstm_seq2seq\":\n model = lstm_seq2seq.ForecastModel(\n loss_object=training_loss_object, hparams=chosen_hparams)\n elif model_type == \"lstm_seq2seq_saf\":\n model = lstm_seq2seq_saf.ForecastModel(\n loss_object=training_loss_object,\n self_supervised_loss_object=self_supervised_loss_object,\n hparams=chosen_hparams)\n elif model_type == \"tft\":\n model = tft.ForecastModel(\n loss_object=training_loss_object, hparams=chosen_hparams)\n elif model_type == \"tft_saf\":\n model = tft_saf.ForecastModel(\n loss_object=training_loss_object,\n self_supervised_loss_object=self_supervised_loss_object,\n hparams=chosen_hparams)\n else:\n raise Exception(\"The chosen model is not supported\")\n\n return model",
"def get_model(model=gin.REQUIRED):\n return model"
] | [
"0.6649939",
"0.6573367",
"0.6460272",
"0.6365849",
"0.6321241",
"0.62554383",
"0.6205857",
"0.609324",
"0.60782516",
"0.60180837",
"0.6016271",
"0.60053897",
"0.60016245",
"0.5934384",
"0.58993465",
"0.587728",
"0.5876873",
"0.5870805",
"0.58685386",
"0.5868329",
"0.5839506",
"0.5792236",
"0.5782371",
"0.5782003",
"0.57736677",
"0.5755802",
"0.5748447",
"0.5737292",
"0.5722687",
"0.57181364"
] | 0.8065929 | 0 |
Function for fetching and manipulating picons | def piconget(pid, mnpicon, picndir, piconslug, hxclr1, hxclr2, mangle=None,
colrful=False, brite=False):
if direxists(picndir):
urlbase = "http://images.pluto.tv/channels/"
if mnpicon:
urlend = 'solidLogoPNG.png'
else:
urlend = 'colorLogoPNG.png'
geturl = urlbase + "/" + pid + "/" + urlend
savename = picndir + piconslug + ".png"
if (not fileexists(savename, False)) or (overwritepicons):
_f = urllib.request.urlopen(geturl)
if colrful or brite or hxclr1:
if colrful or brite:
hex1 = pid[-2:]
angle1 = hextoangle(hex1)
if angle1 - 60 <= 0:
angle2 = angle1 + 300
else:
angle2 = angle1 - 60
else:
hxclr2 = hxclr1
with Image() as canvas:
library.MagickSetSize(canvas.wand, 576, 576)
if CBRIGHT:
brpc = '100%'
sat = '100%'
else:
brpc = '30%'
sat = '50%'
if hxclr2 is not None:
grad = "gradient:" + hxclr1 + "-" + hxclr2
elif hxclr1 and angle1:
grad = "gradient:" + hxclr1 + "-hsb(" + str(angle1) + \
", 100%, " + str(brpc) + ")"
else:
grad = "gradient:hsb(" + str(angle1) + ", " + sat + ", " + \
str(brpc) + ")" + "-hsb(" + str(angle2) + ", " + sat + \
", " + str(brpc) + ")"
if mangle:
angle1 = mangle
canvas.options['gradient:angle'] = str(angle1)
canvas.pseudo(576, 576, grad)
with Image(file=_f) as img:
img.background_color = Color('transparent')
img.extent(width=576, height=576, x=0, y=-144)
img.composite(canvas, operator='dst_over', left=0, top=0)
img.save(filename=savename)
else:
with Image(file=_f) as img:
img.background_color = Color('transparent')
img.extent(width=576, height=576, x=0, y=-144)
img.save(filename=savename)
_f.close()
else:
try:
os.mkdir(picndir)
except os.error:
print("Could not create " + picndir) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _icons(self):",
"def icon(self):",
"def om_icons(self):\n icons = ({'path': 'misc_/DataTemplates/ic-xml.gif',\n 'alt': self.meta_type, 'title': self.meta_type},)\n if not self._v_cooked:\n self._cook()\n if self._v_errors:\n icons = icons + ({'path': 'misc_/PageTemplates/exclamation.gif',\n 'alt': 'Error',\n 'title': 'This template has an error'},)\n return icons",
"def _leadingIcons(self):",
"def _icons(self, c_brain):\n if c_brain.portal_type not in self.i_cache:\n icon_link = ''\n purl = api.portal.get_tool('portal_url')()\n typeInfo = api.portal.get_tool('portal_types')[c_brain.portal_type]\n if typeInfo.icon_expr:\n # we assume that stored icon_expr is like string:${portal_url}/myContentIcon.png\n # or like string:${portal_url}/++resource++imio.dashboard/dashboardpodtemplate.png\n contentIcon = '/'.join(typeInfo.icon_expr.split('/')[1:])\n title = translate(typeInfo.title, domain=typeInfo.i18n_domain, context=self.request)\n icon_link = u\"<img title='%s' src='%s/%s' />\" % (safe_unicode(escape(title)), purl, contentIcon)\n self.i_cache[c_brain.portal_type] = icon_link\n return self.i_cache[c_brain.portal_type]",
"def get_icon(self):\r\n raise NotImplementedError",
"def setIconImage(*args):",
"def GetIcon(old=False):\r\n\r\n # Imaris icons used with permission from Bitplane\r\n Icon8 = \"eJzsfQd4VNW2/yDSpCQhvTfSe++Z9J7pvfeSKZkkkw4kJKE3BVTs2EEBBRsqIkgRQXqV3nsHARFY/73PSRC83qved32+/3vs71vfnnrOKruv9VuHQulHGUhxdKSgOoBieJJC0VEolIAA8v189Plm9Fl8fO/7EAqlyZVCycsj34cXUCh+WRSKQtH7/RwKpWQMhdLZ2fv94H6US079KOHomo74uhTy898rUqX+d3/zW0UgMxF1TV2Lf421habS14TwxdIn/8w12MJarZjTOYNT+uWXZRmtDWaryC2/SOkbGFjyyO9oLHnvvZpGmevtg+3tHRSxLs+Nzz49WUwHYMScBlrCUQgIFCxOS5dvENdWD3j4/1y50bGyuHGr3KjQW+1Nz9iae4aq1Sd15tJbwKCsgGrKQuAEnYCkYVOgsqhpW2KsGbxctUfdXFRrEx2mrhJlL7/AjP7wp0LX9+8EuzffpHOEkQY67KJTPoQiyjzIp8wCcdx5yPf4FHJdXj1fY5x+LDGscU/a4DdAEbnvjjLv0F1e4r77XPfj4DSkHTKCFjbwI89APKUTwiltkOj9LPToATJit0LuqG+gKvXVm+PGrL6iit66xFJ88bKx+DLYkJw1aT+DC8UMcf4LvuClX7kTSLFDTPAH0GUEEDAvQn7+YcjP2QNF0XuhpnjH3SbBqUMN3KP7x8gBpjQAdCgBAgc/Db6DJp+S8HZPEBYCqJJvE7rLyziK/nsaWLRLUKe/BTPHAMxqhJ+n1QF0mwA6EX/TmwEyIz4H5/4Nd+r0C4pNE1PcaPFvzav2WwOVkfugKvkIVGScBWbeNZDT7oOJj3hmAqjLAfj5AKzsu5Cdsgc8Rhiu2fRrhfba18ymRn4xK24HMNzWASNgLzDiyeuUph1FdBhK0/dBUcouyI7eDrlR391Mj/xmZ6bPy1Zr7dzxje0Tlyi4n3zLSD4EEu+jIHTfBtX+26AgdDPkhq+HrJCvIDf04/crcqZ2CblCv9EbKP07vqA80kbLg3dx+PGnQDPqFIjd9kGF73eQF7QC8gI/gRy/uT3tX1D6SxTmqQZzPZ3LF/r+VjsWex16Vu52Clieu6DAfzXkBH4GuX7zbyV6a4l2qDPa3YSy2qliceswJov9yH+TAtqJOt15ul+h+0fNVN8PZlF936/Ndh43IMfn5X74O5ma7GsytfnPdK9/WQBI2oJo3I9oHMF36vfob1YiOoI/c0B1HKJmCuVe7//wOBOAKI/yx8aZx4Us5vqWB6/lyjGD/7vvrzO3hOgtzTadqWVxbaO9scZaX8cVCEP5QrEb/p7BFvxl9+ZLmieLZPYtUkXzSpm6/hWxyjTBam/gavX6QDaH68XmiUPpbMvgnHzef/zebH5DK5NrahfT8oN5GTue5uSdOcBOO3qt2G/+D4nJsmnmWm0gX8gdmUuVER2SWsj/U9c31TUn662NMou9lYrf29vGkvcV1CBq8OPLlAx25lYNHY3jbDQeM9G4iF/L0RiZ7vgyRIQqDhvNqsDEBPWLKSnij37vftW9duJKtKUcgWGh2mDqESlqms0NLR+0do5/9uHfChSqUlb+1dEWOxrvqwEk/qeBM3g7MPp9C6yki8APOAF+HgYIHSX6WiJteS46kgfJKRLjr+85ahSNqAMCyJpNH9NJz58M1GQ7KI1qEZsvVjI5UrZKb51R39pZam5oJn5HL/lm2uRpSNaUn4E/5Ac0365E9ClUUpZC2ZBvQZx6F+IGTYZIjxbITtPOYrGaIDpKDcF+unsB3tr1iF5D1IOoyd9LUxfsZWgtjJg5RVy4BBiRH0JF/Jy7JcHPb0x1nwmernoIDVa/KZLppIR+SmwB48YBGNHcIxi4E+iUL4j7VlAWQxnlPSgb9DVI0wCog96GdJc5kOgw7oxA3LSzumw0JEc1gLuTFjxG6ghycdRAvNNUqBj2BWhK1oA8ZxOwIpAOY5ddZHhugryhi8DN0QCZnm9CRqqqlpC9Eka3qgFEA/dCFeVjRO9BKWU+lFDeIdYORa4boSb+PuQ5LkNrh/mQ6fASFMR0fFxTMwM4lZNvJ4a3nHN+qgZGDNFC6rB5wHXaBqKIPWCkHbknSPkBBCkb73CjDt/njjoCVU+tg5GDzRDoMQ8yfGdvwveXi2GHPOgcIW8+5UXIo8wFKmUOZCHKpEwBdtEJqAm9Ablo3qL6fgX5bouB6v7mT3RG897pU9/+sGvylMkJHj23qU++BzyHH0DkehpquHvOKnKP3JLl7gZO/AEQJp4DSdxl4A49CF5PtsEAihUygr8CSsAT/bVCuI/1nonWSImUCYh6IIYyCSIpYyGEMgOmI9toMi9AVvRmyItAc2/w11Dg9ymURM47JuBMg5fmrtkxoWf5IXnQZpA7XkF2vHDLVnL7hib/JKjzzoAi+w4oqdfBhNYyipGXILT/ZBhIkUA8mvvjHcYzlVxAOl+B7tkJoyhNEECxgVf/WnAfrAGT9SeYhdY3dfrzkJ68E9LQOiQvZhNkh3wLeX4rgV38/AVJ1WJobth1t0wyOqXdtuJtC3fHJlP+1Tumsgv3mlHfbUdrKrz+apeivhVxFZIHvQSDKDLwc38J0p1ff0aK7l/R/2vwQTrxpNSAK0UF7pQWEEp/hmfQmmwiIi7zNOTlHYDMzN0EH9nx2yE7fCsUBG772cJZcdtasROaWBeW9LQcWVfXNmaOXXz0EL43XjPObAV4sRsAr9MMGQB5Tp8Q8j/1RBtkui38qqTk7jFp8g2IQ7YPH/gcpIfvBKMNYBwagxRZSDdF54BKPQ5FqB2Ulx+HqvKjUFV8ECqo+4GedhAkacd/tlUcuFlfdu6+hXXkfn35T9CO7t0mRnJrAcYiGoPadwuSf5IFXS9sDyE/pkzHdw7ohUsm9o15XPQbK6prc9C9E29AVdpJYt2ZmXoCMjJOQ2EhWjtWnwel6BIY5ZehRnIFGqS3YKIJ7o+V3L+OeL7ThfTdoybXslj3+N54bTqxFuDlyQBK+llwRDYegPQcOXjalSbrG3pV4tIU6ahDe9TBJ0HucwrYQUeAFovWeomIUg9BEV77ZpyEwqyzUJ5/EXj0q2BW3YZOZJvZ4wHemA3w+lR0ffR6TgfAM+1I343kPfHaeLSe1MNYJL+AcQ1Chs9E8hvAf2jr7Trde61NTS1Oz8LQ/mL6TD49cvEOvF6u8t8CFaP2QmXcD2i9ewAqUo5COeKhLPsMVOZeAnrhdRDTfwIz0lkXsu0kdL/xqB6L2hoeT+zo81pkBxMb2Z1OrrHxej4v+xJkBX0BQ580gJeT6bJF/XmbzWZzlGuZDh09LWFqizCRVTyuoTpo8S2697dAC9gOVSH7oToB83EIaCnHoTr9DFrHn4dqdC167jW0br8NnNw7iO4+IDYaT5lZPyG6hb6/Aczsa1CVdRG132OQFrUDgkeMBn9P7S6bYeU4odp/cJ3uY21X5ztPq7SGDKlaXCKv/uo63e0bIMh7G1QFovtH7Qdmwn5yH4HaRFXa6V5ezhJUmXkGyvH7zFNob3CS2COUpR8i9ggFqbshD/Wd5OidkB6zEXJ93oMoL/uYhpr1rxvkqxlaxVyDtd5e1NDYSNMZRrdWp+4DXsx+YDt/BxxXNIZ7bQW6/x6oDtsPtBhSHxVJP0BZ8j4oRuNsMdq34H1IfuJOyELjRHb0VsiO2AjZkWuAGrkCqOHLruWGfbIxP2zREmrgks4cvzfyiHWB9vOxNTVzJhu1z7w0pmfyvIaG2YtEjOU/lKFr8ZNQ/0JtUeS8BfhumxEP24HmtxvpYheUhu6EPNRX8yN2QE7EFsiI3ABZEWshI2wVUEOXQ0nswv2FoYtbKjOmM2TiPIex9ylPCNsog0QaltfDc3Zr9xh/e/O0bVZ751djx0+Zb7POaC9LRvsspFs8dmsiT4HU/TAIXHYD130H0Hw2Q5n/JigO2gDUkLWQHfoN5IZ8DTmjlgM14MO7haHzxtKry33YIu0yvojjKJHKwmVyhTONznBkMFguAonKTShVPbj/hNmd/7B2ocft+Zmdchbp/yCow86DyvM0iF0OAM9jJ3H/Ev9vgRqI5AxaDtnBX0JuENpX+70DWb6TU4l1B08czeDqppit9ZaWlpbIahrdmc5gOvOF0nB1/Ye/t3SiVPp/1ySIPQLq6EugCz0Haq8zIHE9DHy0f6P5fI/2cGshN/AroAZ/TuwD8/wWQqbPrAP4vzSWhMKT1gwWK01csaJ2FVpLbxJJVXlMNtNLLNN7/d69+wrXZ/c3htBLYAhCc5jrWXT/g2j/uIPYfxYErIScgC/IfaT/EqD6vAUZ3tMrUgN7iP/yJTqiVuntdsTDLb6kFkQSe+kfvXdW4BSiZrismCF02QtS16PAc0dt32szFKOxKc//S+Le2f6fIdkXQ7b3SzP+1fVWffvBE7gWitv+KAsUz8A8og7wljuXO78/udTt00PFnkjXPp+iffdSyEb3RX14LdoDE3JFBqr/8LX/TPEOePSsKd7b4pzo2TCy731a8KS/5L7/Vwo8VO4hOoLoNhXgijPMPtL/flYnxad/J4WCqe8Eoh/lV+cJKxFdQXQXkRl9P5r4fT+YRBkOtym+cJfi13GXQqXep6ALd8IjBZ9TxCNSUB6fUzwu//3l4fMeXNichn5yVfsTA3/1OxbnP3/m8V8tSr3tweuO8RODW8Z05lobmmgGq51usNiK1DpTCF8kdvwbWfynRSC1ELXB2hZssjU3SRT1i0Qy+yb0ekKtvdFstNbZVPqaHJ5AFMLlC8OFEulw/Pu/8vzrzxRx+cR+IllLk1TZ8LlKa5+h0oy9oFCNPi+StJxDcnxqa2qrs9ps4Wj95cficLxZPEEYVyh3r6DXEOfbf8UZ2h8pTF4NRWnofoLDtz/P4tUf5fBtb0vVent1NZfHiJ2h5ocvtNAjnh1bkGGZxBaLadZ6Q7hao/Xm8HhuLK4gpLhMTa9gGX7dPf6tYmlo+qfflVQV/sNnLAF5vs4R2iVMnuU1vkiZKJZmBHFiVk/n51w9ySuHG6xyuMOl/vwzM+DA7RSvrssJ8YoFMoU8RW80eLM4bJfCQnFOZqZi0uRX7X/6DPG3yujuScNRn6vUmxtmGmqb3rQ2tnSb6ptH4e/6zhhxweeMuHDFzU4snvV9tYLuRstcWsoruHSSKwRgor0yHe3Z6XyShFwAlvdBCPFowmePZ0tKRRUGg8E7PV2RmZQkh8xsSR6+XmoG8w/zis8kqxnk2lSq1Y+Qqkxipc7Srbc2LBLL9RPQ+7GI92UtnRO+b+0cn4V/1zqu55FrsAW1Y9Vqph+LerJFzId7UgOAHPHNQ1O7IO4qCEJPgSj8DCBbgLwYIPOp+eDloYPwEOk+JkeaWlFh1SObQEKC+FpBkcofX3PFAe0f4p9WTZ5XckXaODq97svSItv90nLtUYPZbJbINVy5WstDZEIyPN3QPm5DW9cEwh+kNpD+GRa/PkSqZ2fyi861GS0AFhvJoywM7XVG7APOgI1Ao6whfI60wZsBn00xnXaA7/A6CPEzQFgI/wtLbWN+WZkZIiN5kJAo3pGaaR7+W7x6eXEenMfi0ncmy2C2hHIrJx5lFsyC4vgpkJvUADyxpoUnEbMKikR1efkyvUQhSmHz9a9Z7O3z8H+QLMR/BVJ+rLB4R73JCjChG5/X3gPByFPAH7gV8f0NcXaKzzDJ89NlxBmA2Pc0RA2YClEeoyHUzQwZKfIepbp9a0aGHqKilBAdroFAH80hRG8FeGvrgnzVNFRHI/JD5OPvqfX28FD4RHo2+KX79YQKil9ZLCpaCMzEhVAe+gaUJExF+/3Ombm+z0CCTyf4e+oh0E93JSNDXMZkq6faR3d6YN5FdsoT5bkfC41GuP3qCwDq5DsgGHEE2P039vp4yfPmX85+PwZm1nVQBJ+HzCfmQYrrTEgY2QVRrg3nK6vqPtTruiApvgawLzjIxwA+bjrwcdc+ID8P8r2XmwYi3Nog0+VpKAl4FeQly0GcuQ5YYV9DdeRSYKS8dqDa44vrFa6fQdqwueDiWAOjPOohyqvpUimNS+WJdNFE28lcOJRRfXXZSy8h3jNQ/xxxDJiUDQ/p/EPCT12OiDw//hCY2TdAEXQJ8vqjPaLz65Dh9CwkO0y8nx3W/BGNbr9ur58FSZH1kB7TDK4O5Dm2u5P+ATkNV6Pfz4Yqhy+B7rAaNKWrQV2+DfgJO4ATsQn4KZ9fZwV9f5vhsx0Yrt9C9sD54DbcAjEecyDbaz7E+dd9om9mEn4jVjH42ixwy47GGonjKeAh3rHOsa4xz2W9597llLfQ63mI/8XAR31D53MJioaifabrIsh1eps4/04fMeNQeXndFxJJDxhUc6A0p/vn1JjWmy5DjeA8XA+4dkSUOewNYDtsAYbTNpDEbgcDa999df5hECXvBmHKlvuc2K232OGH7uPzvkq37VAx8AtwHVILjkMaIS3kK8hwf+lqcrognmj7lWCuUSPdoz05u98mgu9ixGsx5Q3inL6Q8ipRF1BehjzKc4TO8Vmm3vUyFLiuAqrXMsh1+xDyRs6HbKfXoChs5ntsXvOF9tZXwGqe9o7ZOuGVxIDOGx4D7eA6qBZwbADHAel55D7gex8GI2vfbT398BlZ5gkQpe8Dcc7G+7TwA/d4sWdAGHMaWF5HgDlgC/gOaifONr3c34TsoBV3E1y6iMFNyIQ1mkoA/vDDRFvp4xOf9edSZkEO5WnIRJRBmQGplAlQFr2XOEOV+J0BasA6oI5aA/kBK4Dq/QlhiwLvt4FV1fkRnzseXn118fMTpk1tstlmfBQ7rOcu9YnFwH1qO4hGHgKe2xlQ5xy+ZxZu2aWkHrwnyzwGquItgH3qjNhjIEv5ESRJiEJPgGTgAYjoPxUGUTQwkGKFxMjtkOL28ryRbhUDxGy4o0i9g9rN94SfAvOZTJmCeJ1E1Nh3EEvpQtQGMeh9bS0ao/A8lnyEOCunRn9PnFdh/0GR/5eI/yVQ7P/uZR6n84yU8yK88/baVVOnv/9uz5gPdvAcN9wXDT0IYpcLoI08d9/KPn7MVHTnriH3CqiLd4Ei8wJIM26BLPsaKHKvgQHNPaq46yAefAKS+r2A9K+BIRQJBPl/Bum+H+yMd+pwwvOpNPIS0eYx79jfEE0ZjfafLRCGXocNbIPAJ+3gR6mHipJT8M7zAB2IfwX9KGQkbofMuG1Ajd8M1PD1hC3y/L+CfJ9l96vTXzwjYk+/rxG9B7Omb4EJXbvBpFm/VRGy6b1a6v7TpsKLl2oL793TZ94GU/FJMBSfJ3whtdUADWyAOtYdsNLugCHrZ5A5XoDM/gvgKYoB2UABro6TCD9N0shZfph/GZpXC1EfxXoOozRBMOLVv9dv4fyEAfUdOcRFfQvvL0C6R21n9jj0H/5xSE/fCSkJuyAlZhfkJm6F3KiNkBNGnuHl+qy4xac/c0XFfB8MwlWg02w/RROpQvTijyo7275YUlf70uvG4ovXLBWHThoqT99uYpK+FOzbwOf6k+pIakDy4LG6YMhnMAzxg/kf0q8RcpC9Y90mVfKRrOLYS6jdv0X4eTDPmNyQrCNRf3FAdXLEWpg5C2Ai9ps0AoxuuAU02gmgFu6HjIw9kJFKykHYI3IrZIZtgtyg76Ek+LurVsHHt83MtWAu23m/jX/x+9k9F8Z3ta96G63HehpaJ75mFx/da6cD4FgbzDuOt8F+mRfGA8xqBRiL1jCahFtQPHI9OFPsBP+DEH/x7osg3nlmJ7caTglysK9oFfhQzOCK2pgL+o0XRQcRw9+E0sKz0NGFrmsCwDqqEf8ELNo5KCk5AUVFxxAdgfz8g5CdvQ9yMndDXsYOKEnbAUXxu6E8fB/wknfeaOBsvmMrPwoNpVfvNkmvH7RJj16p0yzfWcc4d6NDSvp0sO6xLwn7NzBh3WP/Vgsa17F81YEHwfvJzl7+NRA69BVIdHpmgbBq5zo2soEk/TZUDd8ICYPnQ47HeijJ+RH4SCdNSB/Yb2RCMvLRZ2V55yAv+wTi9wTk5Z2A0tKTUFFxAqqqTgCt8jgwEJ+M4sPAzjuE1hlHQJx1ErQFp360lO6/UVdyDkxlh8Ba/cPtumK4P4YLD3xSmH88rvX5h1rlpG8K+6rmtKPxvegcBA2ZifqvghhHfftPh3iHmesVghefZVbDUSaSky0mCa+ZpUrS12JHfcqQhNagKZehKv0UUNOOQxaitLSTkIV4y8s7g2Q4S9hEwLkACuF5RBdBw70IOs4lqGFfhyb+PehUw+1m3o1zDYybF7vEcK8b8ysl/XGYd8wr5hkT5h/7lbCPCcdevT4T6VFzG+Kc30b8qwj9u6KxJWbYlCMG+QvPNIp3MQTUqwewH0iD48VKkb7TEEXfAWnUReDGHQd60mEoTz0EZelHCN8bliMbyZObeQYKcs+hsekC8OiXQSm6hsaZm2CvuQ1tlp+hG9nvmQ6yPT/fQbbpWah9TG/obS+9NN7y24T9nM+h/7YiWdICv+4dgwzgiPpCxPDuy2bFO8998Nl0d3XBfC9e0Moevt/396Qe+0DouR/4vvtRu0OE2jH2l5UnIEo5CMVIDuwDKkFzZkHmKcJ/V0o9D1VFaA6vvA5a0U2o092FLsTn0+jeLyP9vfUsqcdXe/15z6M+OmsM6UfFfXZSrwy4zWO/HvYvNitJakXvNYK7kI/GOY/BbQT/TgNMEOzQfq1G8d7H85d0jDSbrSPq33AbxOTp/dnZ896lh356tcrvG6jyWQ+0gK1QHbwHKiJ+IPxL5Yn7oCyZlKE8/QSS4xTh/6vIuQjVeVeAWXwdRLTbYBTeg0bs40RtYEYHOe7i+LxpraReuyy/+AWb5GR7rRc/6h/UofFTitoDreAGZCX9AMnur6D2b0BrKT34j7SdsciWffPKR8XD0VbHEe3HHQ0WrfuceV3RQo69jJH68puVwUuu0bxXQbXPBsJnSMO+y8h9pJ8s8QDhs6tEbakMtSXSV3cBaDmXgUG9Csy8G8AtugnSsrtE/ImWSfKFSf8Qf3j9oizt3TMVkjGDnBxM94CVfQdo2TeRjS9CXNx+yEVtyPOpemI96ONm2GpRffPFmAk0J6PR6KBSK510Zo5rz6SuOKVGGy9VaUtE/Do+PemVg1WenwDN81vCz1QZsBsqRiEZovdBVTxqW0kHoTrlKFT2+h7L088RcmC/ZnX2FWDn3kB83AR2zi3CJ8rO+Ykg7AfF7xloDia//5FYm2OfKP4fIwuNGdkXCP9lbuoxiEb3S0fr61SX54h9RYCXbn6tbv1io7HDR64UDjdqXg5orf3k88k9S5+XqxWRaM9ZJlXLK5TC1/YxgpDuXZYBw301ML3RftJ/B1QH7SP8lnSkl6r4A0Qfr0o+RvjX8ViFfaiVmeeAlnGekKePqrLOE35eXFdmne31s54m/dCoPZZnkDGYJWn7oTB9L+Sn7oas5J0Qi9ZtaWi9hf00ce5jIMTbwrQZN71qrXk5Nr2M0t+qWDGzRj9dM258N725rbtIqlKXoT20iV/+9U1B8gFgem4D+siVwHZbA3SP74Hhs5OI/aSPOgCMKFIOZsLBXjmOkDYh4kGP9vp8j0BJ6hHS95t2kOCvJHUfqvegMWEnFCIeC5J29PpptxOxJZlR3xH+08zwb9Da5GvIDfkSrXuXHsnxfGFCketLA2yGzVNt+rWzjfJvF6gVk0QqnYpqb2krHdfdzZBqBJU61YuLaeg+7GS0No89ADwkA3PkGsJ3zCZk2E74j2lBBwhbYF82Hqsq4/eifr4XSpL2QCnal2A/cn4q5g+tlXp5zER7l/TorYjP71G7+A7xuAbxuArywpcjWgb5YUvOU0MXLy8IfW9Ofvi8hqKYlxjUgA/9qf7vuhR6v9Yfr/9N2i/LDcqvNmiNDUyldDLa2nd1t3dPfK574rOL1aquTn7ZunMVaT8ADfHAS0B7pJDDIHTfAQLnjcBz3QQcj81A9+n1PwehNUMw9kHvRvuYPVAYuRtyEVGjdqC6T58bCV4f1ik1fNmd/NCPz+eHLVpVEvN2R0XaZI5SmTB8PQztJ5tBGSBqoQzmyYpcpXqxw6/PM6zWl31sdXO+ra+f851ONnfT2AmT1nZOnLZ2dNeUdWrhkiPlaTvulSP70pH+sd9aEXOciPmVuuwGgctO4LrtJNoVtgOOAa4I3AZFwdugOGQL5OJ1XNhGxOc6QrcZYashO3QlagPYx/sJ5I9avKUo8p1p9KrRO9i0mljMj8qe7qwwMLxEEp4D9m3TaAwHOoM5ki+SjqqxtvT/Nf/N49qeaOuaoLA3T9pgbRz9SeOY7neR/sc0NE0tqozfuRjzzkZ9kJVyEjiRB0AVcxYU/icJv7PM+QcQ9vLP9t4GlX7fQ5n/RigK/BYKgtdCVsgqRF8T/JI8Y7/0R4DsfzQ/+PVUellNApNT5s3iG7awhcpavlDwlEimSKqpqXHn8HhOdCbTCcngxGJxPAUiVTCxX3zIR/97pTR401ga6m9cNB6yE08AN/IQaCLPgCb4NCjcT4LC5RAIXfegPrGDiKOo9NsApWhPWRDwDeQFrYRstMfAhP35uUHLcBwBUH1fO53tM8EJX5/Dk3mxudwQBkc3mcHVLdIZzXKtXh9hq6v3ZrLZ2Lc/ksXhufMEkki+pO5Px8yW+yyPq4zc+yMn6TiwY9C+L+o4aCMuEvwr3c8S/m+R216Cf+wDx/zjGIC+GASC70AyFiA/APul50Gmz4zuvuuzePYneCJVAlcgTq6oYkQ2NI9b09TW2SGVy2M5XJ4Pg8Vx4wlEQUKpNoD8vfLPikCh+a17CcdLKNAeRxN9DfShF0AXcAYUaP+N+cex6zh+oNp7E5T4rSfi0HEMQ27glwTvOYh3HEuA4yiovs9fS/Mdl0JclyUhrk/nKZ+QKK2lIoV1vkBm/Umpa7htsDQtkkhlmSwOx5cvVoaJlaZBf5rxhwrHZ+c2TcR5MIZeAS3ay2l9z/bGPhwGvvsPRPsvR+uLEqT7vhgIkvfPUJv5mIhFoPq8gXT/9Oe/vnZfTITGaC9W6e3bJErbHbGiFiRy+0GewJqv0NT7/7t8ZweR8RL5LnMcWe4bl+E2j3nHbUfudoKI3cCxI7jtlKF1Ho6hyAlY/oD3B7r3eQ+yvV4+nuHT5YyvFxto+s37sdg6B4HULEL0NFdkmSGStlD/Xd77Sl/MR5nLOwNKXBfUsUauA5kLmstc8BnOXkL31Wg9UeS7ljh7yO6NASGxDB8T8RhZPvNOZXo/G4KvkxjY/Jv34Qh+idPomjjpAaKgklP6p+JDfqvgmJEsv+nE62ERmcMKnF9/nub8+Qm625obVe5r7xd7fQMFPiuA6reMiF2h+mO9f/Qz1XfRJdRnl2Z6Tyb0/lfFkvzRMipA/OD10JSEoenu0wtz3F+xZnu8MT3H88152d5vv0X1fvu1bJ+3JuZ6vy7P9pwd0vf75KDOv4Plfyg4bibO3/7oh09RKHHedQNSPMYOTPBsfgRnkzXqWUpC4D/3xz0uj8vfWchooT9Q+5P1XQeyvnqLrG/2uwyAZpgLlMtwl06h3Kbo4eppHLcUDEevUyhHKJSOlUPJWCYKmkU7cd3/H2sKftk3aj5ae/wan/VbJa+3VvTWfdEnfZ7ijk6ybugkr1vbWxOTI5plcPSkA5D8UFG9DnH1NZYbcf1ab32ZrB1W47qT4nAU1ysp/j/h+goOwkL1bUoHqbfuP6TX8F7eSXYCfl/Qx+VxeVwel8flDxW1ttlDpW5xLS0tH0Ityn3i7+bnrypqIxmzaGvpcmvp6JHZ2zon1Ta1P2eub5pQU1vfbaqt60a1ViiWJvBFYn+ZQvOnchb8Ty+mulY3vbl5hqW+cQJPZPuWw68/gmy/oLbRbkCf4bjHOoPFxkayj+IJRKFcvjAevQ7E/2Xz/uvxan9H4YnJ8xSputms0NiX6kxmrlLb+oFW1wEa3VgQy9pBIGoBsbxxdUPzmK66+oZok6U2mMPj+TDZXG8WmxOIdBErkKie+ptF+dNFrCBhukJJ8xtsbiOwuPUgkTf9JBDVL6moNrTQWeoytYkZbGop81capYkCiVmpqbGxFRpdjNlqDZbK5V4MJsudyWL7MDni1Aq6IfFvFulPF76k+Xk6qxZYHPMLErkhmydk+sjtqb60XCuNFbnwWWbI12vYwV/vKPdfsC8jeNyapBTpVKFUXiRVqtPUWn2wVqf3ojOYrgwWx6ugSFFbXKZO/7tl+qMFyU5jcKwnJXJVjEAkCRDURXmxEl4x8JL2HBXk3AMxG4DLJeM9FZUAsugLUOz4IYQFGSAhXrFEIJbky5SaUTq9wYvGYLiM6UodkpYuh9JKtc9fFUOs1JOx2Wh8dq21N9OM1sY5WnPD14bapm+s9pb51oYmDf6+ps7+L6+DC4tn26fUKAL5QrEvm2qK46cf2iFkAjClpP++WoZI9AsRcS7xNyBv6Pvg52OA8BD5z8VlQpFcqR6F2oGnrSl5cGKCGtLSxFfwxiS/WPgfk5svIWN0DVaLt7m+XqOtsbQg2VeKpAqLVG1ZqNCau2rqWla0d01c39LZs2rMuPGDjbW/fc6GC5tfN1ksUSeL9eXu/NgVAi72maoBpGYgYi7EfCQr9vvmAAjSAETlSCdCADX6XOp5GiKGjIcgfyOEBEmgrELA1GgNgYWF+oL4WBXExkogI1O68j8lu0KrIGq51kSvZuo3FhVrgM3Vf6k2Wtolco1GpjI3CcSyAqlKazVY7e81je36DLWPf5kHgSuwzKppLXcXpu2xs+kAlmYArRERklGcheQNOw9C15NE7C3/qd3AG7ILBJEXgMklcxGUDPwQXBwNEOyvhVGBwmsSmTTVaGwvyc01QFyMGBEfsnMVlv+UDth8y/TqykaoLOyAgvQmyMtTXTbW1tZKFCquSCbLUqgNdolCZxTJFEy0XhlvsbdvaxrTI/uH6wgMOHeSWGUShklyjtpxfPfkZwCMWtJ3rgi/BLyRx4A7ZAewn9xAxO5WUpYTsaQMylKgFZNjAfup78F3cAuEB9gg0EsDoaN4q7Rmnq9WN+5OUoKQiEGOixcC6g+5D98/MJDxp2VnMcY2ckqnA6dgLlSmzIaCyEmQm4DnK90ksVqaX0mXN2ZmCuoyMqT11TRxkUDMjqpiaN5D65lNv6lLgUmrLtkqwbK/9g5AvR619SRkc++zwBm2FzhPfv8gZplGWfZI7C8t+gjRBwQjDkHkk9Mg0rkDIryaINhDB5npIhuNYXtPremAqEgFREfJITlRAQHeagjy1W5E9Hywv9rk66HLCQtSjUqKl/2DL/DXhVY8PlRQ8iqICxcAK+19qAx/C0pC50JxwmSgVZtsecGTr2T4ToZY/3bw89JBoI8OIsO0X2mM7OBqhvr99q7u6F9fk8doseE2v+gjgNZaZPf4OyByPgvcwTuB+cR6MsYcyd0Xr41jQvticcvdfwAl0ptw5BlIp7wK8cOmQarveIgc2YZjuH/kiWo0YnE78HgtEBGugfgYPcRHGR/EPWPy99Q+oADvB/QjoquIrvl6aX6O9miBJJceYOe+BbKSL0CUvQLYUZ8DI2QJVES+A7ScmUcrfd7bXe6xGArd34WU4TPBw8mI4zkgyKsGIkbplklkjDCu2KR5WHaOqiqZUXhv2/OvAsyciNpy/F2QIVk4OL9Hv7VI9uVEHGZf3DSW+5E4ZLedhPw4X0Q++ixjxIuQ4f0MJDhMhjjHsZDoXzenvKr2qN0+DYryGyA+wgIZCQ0wytcIXi66f0nuLhqIcG6HYocFUOqwCNijPgZ15degLlsHwoRNwAlbC6zw5cBJXgzMyKXbcSxKtccaKB/xKSQPfg48HCxIz3rIcJsDca7jIClJqmRwVI90OF7+qgYl6usL3gZQonFdiOwuGLgd2JQ1D9mclLv6odjryt68I6Ve+0CP4xEdr0BJ/08ge8Q7kOvxKqSPeA5SRkyDxBFdeC1lKymzwYQJLxOyp0Y1Q1Z824N8KH0x5K4Ov9ROw7WQPGIOMB2+BrrTKqA7rAVNxRrQ0baCIn8HcGN2ASdiC3DjVoMg/eNjdO9dwPTdRfieKhxWQebAt8B1mBW8nGqJHAZUr/cgxrP5qEghyXhkLCmHHTifTC1a28i9zoFgwC5gob6O5e6TubyXsOw4jru8N5a7FLX3ioSjUIPkVz51DUoGL4NcpyVAdXsHcpxehwyHF1B7eBqyPLoWMti2LRzOWJg08XXIThgDpVndkBo9Gvri0PtoJHqP87NQh72DZP8O2E6bgDlyK4hjtkENcw/oKw+DLH0fCBL2AD9xO4hyVt9iBR68xRh1CDiIqr33Q5njOih98lNwHlIHTk/VgBeyA8b6p7u+BGmZ8gftXxF7JJiPxi6cm0IVfgP4g/cBk7IScC66PvuSsj5KOKaazP/yNFTkA9ShOVAz5CLkI1vlu38JVPclkOvyHhGDn+X4KqI5wKDZaqvpzdDSNBc62l8HLr17p0rd+UqM9xjwQHy6DrGA0yATDB2khayn5gNrxGYiFp/jshvY7vtBj2Q3cQ7dUuedAmnqIUT7QErdCJWR627wo48BJ+Y4CBBxgo5BpdMWYPb/FnwGtcIIdM0hFBnEor5C9fsCYr1aHoD6+cXQLVeTsY0YZ0CnrOvFczwao98nL6ZcgmYRlIe+xzGdzej/yuE4Z986yPNdBXleX0Cex8dE7DuO4c91fh0KAuZu44oa1lRVdMJzs5fcmjh1krZr8rTp9fVPfxL6VBf4PTkaPJ+0Q/7AxcAbthX4jnuB73IIcJ4/ZfY+MIv2XTBU79+tyD4F8rSjIM3eC+KC9ffLAw8TsZjChIvAj7sIvEgkx8h9wHtiJ4Q+OQmGDTDCIIoKXEfOhfTwDZDiOhv65OdWw7dKtLZVRN0C0eCdhN2xvA/H+fdRNmUGZFGmoTF+GpHrB8fSUwP3wOhWgMYEAI73Scjy/w7ykZ4LRn0DhUErIN/3M6It5LstRHp5G+h5k+bhPsBlTLq09OPlMydNnVHXOWHK0w31z30fQZkARU8sBd6gTSBEc67E+ShI3M+CLPgkWCWHbus5G76X5+wHefYxkGQeQGPgBiTveeBhbEDcYZCk/QSylFuon1wGntspEPbfD7H9n4HhT9b0xhcb0BpkL2R7fAj+Xpo899goCprzbmtxrH3gaeAg22N7Y1wDljENyYgpuZdSKD1EviGcZzGOMhrRWML2OC67NgKAl7gPMsO2QF7MFqBGfEvkXsTxKoU45sP7E8j3/gAKPBfcEIrbt3JZE+4rBPN+/uzTzcunTntt8tjOee+Pa5q/Fc832A4YryNxOweywLNQU3Lktl12bKO14C4Ycn4ERS6SvWIzGgMuoXbwI4jTb4A44yLIsq6DDq3N1Sk/gSL4LEgGHIYMJM+IAaZe+RXg6/s5ZAeuhRjncYTjG8dYa1HbFTkdJeZ1bPckZNcEShchZzSSEcfvRw3oRmubTghD74MpdghAuqQm7YFXXgEYh/Rnr/wJeEUHiFxDWX3Yg+gNkB1Cxr0QbcFnGaGHkpA3NwuF44FPfwZ00kWw6P0fYNL4tTBlwnZot6+5pPLec1s9/Cqo/C6BMebqvQbetTNWNMZgTII+5zLoy/aCLv8iaLPvgibvPhhL74Gp/GfQl14n8lpivII68hqIhxyH3H6LwOlJa6/8KnBzngmpkdsheeScPUT7R7yrqWjOG76HGOtSkN1jevNoYqwFlhVT0IBmCBhYD76DasF7iB4i3d6C1g6A958H6EFz/4R6tF5iHIKstB2QjOYlrIeCpC1E/iecOzI3GOkBxy/5f4Xmxi+AWTx9p5Q/E3jVL4BW+jnMnb0H2pvQOpO55WtVR1KMkbd4Rkft6m02/v5dpty7YMm+AwaMHSk7gsaq88R4hWUlckZJf4lLt7FuQC3ShS7pJxA4nYXiAV+C+4CG3hxKZB8oyNqFc0lBn/1VOIZ+6C6i38eh9h1CyF1P5LbCeA0P9B+Me3DqrwWnIXIY5T4ZlAaA1V+heYNN5jLqbr4FXNYRyELXTkvaBUlxiKJ2ATVlC5HbJidyA4HhyAleDTl+30CB96ofRfzJlzSc90BT/TnoJOtBLt+xt1pGcRWK5cF1in0dXR0ffdDa0fFMDWv79+aCG2Bh7tpXU33yJsYN4Nh8HJPfh+vAMdYYX4BzKeHP9On3ib5T9tQ6CBg8+iH5ZWjOXQE5rkvAx1OehOXHMcI8hz1ofHuOsHtfbq0+uZ0pOnCkaGA4RQQRaE6XorXSZ58hmZG+cc6tWWjNaNFfBhrtGOTn74fMzL0EhiU1cTehhz48Do7RzQrbiNoCGiMDN0JZ6JpzVukHP9s4K8FUsR6s5XtgjOjS/u6W5YUvTL/11djR82dZG9rG1TW3Tmzp7nm3QXDqgK2clL1TTRLOGYXjzXEc/Qu9+aTGoz1rfSmZP6wMrRvCnprwkPwqCHZ7FXLclkO4c7OOWQ0/Y9yowOsEMZfhHLDevVgXLD+pA7Q+Q//LGbUbMH723XdJfAHGG3WakOzq66jdniXyfRUXH4bCwoNApe6HbDQ/ZWTsgszUnZCZuBOoydshNw7HfO6EgpBdUBT4A3Djvz/TqlgPjZztUFdyCOxFV6CVe/ndsdabJ22KrbsbWibOa7DN/66Ocek2xjWMRn2tkUvKjjE3GDeAbY51gGscc4+/w20Ejxd0n2OQitZgOIdZn/wO/bvRnLQKYhw7p3JK761nIznkqbeBMXADIb9Lr90xZsYb1amOS4GL48xt6D4zSR7syA56RKLKq8AoO4dkPolsf5zAzpSUHIPS0qNIF0fR54ehKO8A5Of8AIVIH8Wof5Sk7SJid3GsOxfNW4rcrWfsrN3QUHIKGisvg63syqV6ybV7tYITgPo/1KJxrZVL4jpwX8fyYxtPNJHtHuM5MGGMQV/utAY+mddMnHIacka+B08+ZH9MGGMY6zB+saj0qy+Zved5krSfgeN2DK29lkG64xdQHnqIwBTjXGumenR/pO8WpNf6fLxPuAfM7CtQlXcGcjJOojb/C/YG66Ks7BRUVJwCZvVJghhVJ4BVeRRYaPxiEzicI8DJOQrCnNMgR2O6ofTk2bqyvTfqSi6AsegImMoOgLlyzw3Uju/idoblb34Ik4NxUWPkv2BbWnqxLhirgHEuU1C/eGEc4ptxE3Ldv3qAUcOyD0B15rDFEDNiwhq1ctx0QcH9vX1YdqwL3B6I9wqkF9TX9bg9Cck1njEFyZ50C+ipF6A84zTkp5+AjBQSu5OOXmdmnkJt/ySUFp6G6nKcfw7toVnnkH7Pg1xwjiAFeq1An6kYF0DHuAIW1k3CpkiuH+2sKyfrq8+faay6e74T3RMTxnp19OJ6msQk9cmLCWM0MGHbE+2id0x48xmyf2b7bwLvgS0kxo2QX4Pm8+ex/D+oxbPfeOPZJYnCrOu38FzYh1fC5yBaKbmubyxGfTwJ6SHmJ5DGXwJO4ikCD4Bj54szjhIYBIz7wXn3SOzPKchBdi0pOAcVaK5i09HahH8ZtLJrUIPGijr9dbDrf4QG7S1o0twhsCYYj4TxVAQGCPE/DbXrycZfaLzxoTZvJMd8jKfBbb77oX6AazwG4HHwlenoOkgP2Wg+jneY2TsG6og5MKj/RIge0XVGK35twYdLF4c26Tt8hYE/rNfEXgJ9ElpPoPWsKQrVYT+CPuQiSMNOgjDqMHDiDqJ+ux8qk9AeK/kAlOB8f+lHiJx/WAc47x817TSBf8rLPgeVheeBVnIJ+MjOcgGaw9S3wF5zB1pr7xE4oqkdZE5AzOs8RC9NJHFEuO1iXNSzY3r10krqCM9xfbgibGdMvx4DCD2g76ei/+F75KG9ItVjce8YYIDB/fTg2b8Voh3GXTFK3130/uK3AsePH++6GYY+UR3yGofj+/lhofdGELhtJ3Kocbz3AMtvL5FTD+NbME6HwE0l7iVyDZKYo8O97YHUQQHOv5h1FoqyzkNZ3gWoQuM6xlApeTfBrPwJ6eA+9DSTOKrnppI5ETGWCrdZjKfCuRdf6MVT9eVIxPI/jKnqyyOOMW2tvbiqvr5A4JbQez3qy9m4jfqvBteBNkL+YQO04IzWcWGOo68YZe9tXbR0rmdXd4+rXq8frlRo0MpzaL+q7EksVsrbO+ghnwMd7ecqvdYBzXcjVAdsJbAMlUgPVVG9uCtEGOtT3osfK0XtoBjpoDTrNCK0BsFYndxLQMu/SmCwBFU3kR7ugEVK4tcmINs+PY7Es2F6tpvEZM3A81kzicnqriX7Mm7zWNbWXlxWHzbLJvwFn4VzpGP8kwzNT6yS25CSchKtPbZAwsgpBEbLaSiS/6kaCHZuOFMjW3r6/a8sjh2d41z0euMIjUYzQqXWjDSaa/2fmTspiVM9hl8d9+ZuZvgywHitSu9vkT42EZgtjD2rCt0H1TGkHnCfwHqoSCWxWzinIsb/YBwQ1gHO/1hFRfMl9Tqw0HqOV3ITRBV3QIHP2hH/Nilps0b1o/Z8WEYL91EZMcZL3Zu7A5MkvxfnlXsfGLl30P2uQ3LyCYgK20P0AZzH0nWYGVwcdeDratppkX91/LmFeQ5tbW3OOoPBQa3VOmp1emdLXUPg5ClTsxRqXbzaIs2i5Y+pr4h860KV36dQ5fENVHl/B1V+m6EycAeJXQvZT+QA/QW/dgjp4RiB/erDsJHYqIuEHjCWjZZD6gLjv3A+TH7+PUBzEZF3E5O4F7eGqU8uEZUkjGPjZ9/vxbLdJfBsffkzMdFyrgM99wqUof6XmHgEQpGdstE+DJ9ND+1nIs5e/T10X1nVa/c2tRu96uobnLRarYNKo3FU6UQjLQ1i7/GTu5Pkam20UqMvkaq0hSqrqICVN2tTlc9CqEbrRxL39j2Bj6kK3EOMDRUR+4mc+3iMpCUc7M29eYzIv1mZeuqh3JskDq4q6zKBa8M5PzHOrY/IPKA3H8jT9578rhcPh/a7+L+MrKskJo643kVCzwT+DM/FqD+mIJuEh6M1eQRae6N9m49LDXG+7Oelm1Or27DRUjMjBuP85ArlCImCMbzJ8mXtGPunt6ZP+fQZiZIfjOSvkKu0RVK1qlKlmrZZkLIfSl0/AxoijJtjeG5E68ytqE/sJMbIPrxWnx4qe3FneM6s7NUDxtDRkB4whq7igS4uovePEv6u4kFN4usw9u5hbB3GepYS/Q2NPemHoDDtIBSiMR9j7HLQ2jsezX8kxm4DEcue7DoVgj0tEOpryrfqtyyz1bxRKpXJh4tFiqEW3ccJtZqle+taavImTJkmtDdOSVdoDZWoHZRKFUa+XPDGBVHWYeCi9lTpsJbADrJcVwPLfSOwfDZDte8uQgcYt0YLP0DkPsV6oPXh75KOEnog87GeJMYIXBNyPEwEXvA4IVdZxtEHspHzzEEiR2sfRg/nas3vxelhDBy1FwOH953paP+N5cZ4PYzVIvA3Hm9Aiuf4z/D+t65m6wsN1k8bBXz5cLl44giTfPVeTY0q12C25nd0ja/u7JlQjeSvQuNAlckyZjYzZ9M1Bprz+Gj+4/nugWoHZHuX5cB2XUvqwGsrcQbNQP0BY4zpwQeBhXRAjz2A9HCA0AMeI/uIwB33Uh/2sDiVlK04dS+JQXwY44f2UTkJv2ARH8b5ZUSuI/LHkvi5VZAduoLI7UpgE4M/gbzAJfep/gu+zvN5mXgASkxADcWq39Reb/z2oEm1Rm6Uf7dLqbaUa/Q1xbqamtKm1rbyiZMnM5RaPVeqljP0irc20NAcx0D84LMuccxBEPrsBPbIDcBxXkXmvUU6YHpuITCYGL/IxDoYRWIY8bqhHM0VFYjK4vdCaTyJY8RUiPZDmLCMVGRHanxfXtydj8iJ7YjlJHF3qyCrV8b8iM+hMOIjKAxdfL4gZNGqgrB3XywIeb0lP3Quvyp7LMuHYv7NuCyL/oNko2rDGZ180dMCeWGiQjaaY7a1WVvGTuhsHzdx1tSZz38qFXfU64yjWwTl392uykDrvoRdwEX9WdSLexR47Qahy/cE9pHruhHY7ptITKr3DgL/iNtCOcZpo/1zScgeAgOZH0HiIAsJHOQuYk+cFYXsGrWtV86ND2R92KYEZi/0C8gLXYqus+hIUcT7L1akzuopjp+SaeqiDJyJ1nAYF0kX5A4lMIZ0potQogz9Z75Es3zb0FrT29+ZDW+sbe+avFRVvehgS2fPyo6JUxeO6Zn8WcfEyR/o5K/vsuqXnMd5fjHusBDxWxG7l5BfHn0MZL7HQez+A4idtxGEn5vCQYTzD/fhJ/E8WRa0E4pHbUc62IHstB2oYdsgF5+X4nOR8A2QHfEtpBH4T1LePkwlbr84Z29R2Ad3q7NeOFmVPaMlmjKWeBaK1k4bwuULY9RqtTNPIMDYyhE0BsMByT6CzmSOZLLZvkqtxe2fyY9Lc2f3802jJ2yoNb+4R6cd/0lr58T1HROmruuaPGPNmPGTN5gMs1ajdn+2ALXHCtQvmWjs5iD786IPgDr2DGhCjoPM/SjIXX8g8h5j7CVeN+M2QOSB9t0CZf5boDxgM5QGbYaiUZshL2Qjou+I3PDZoWRbxvISuMzQh/IUByyG/OA3VxfHTZeJbBQnFt9wnieUcfkisSfmXSiVh3J4PG+z2ezG4nAckMxYBw5YB0wW200gViX9K9lbO7spYub0J9q7J75qb53wfW1T+yd1zWMWNo/tnt46bqKuuaM7utRpW24lsj2WnY3mIQ7O9Zx0HFhofFfHnAdt6AlQeZ0isacu+x7ogOm+g8Cf4jVCBVo7l/p/R2BQ84LWIPqGkDVn1AqCsJ2xzBiLmhP0MYGLzPN7Y1Ou60uOQnVlEJPNDWawGP5MrnETm69fyBOKymQKxRMiqSLbYq31QutWFwaT1YdNdUSvnVlsjpdApAr7V/Lj0jRm3O/9hFKK2isDzVPM9HPATD4FvPhjBG5VEXUatCGnQeOLyO3EA+yqyG03YX+m12ZivYwxoDgPdWHAGuI8HD8TCM9FfTWWG2NZSRzuAsj2mbsX3zc9fDQFTvlQOHxJGrJvCJOjH8/gGEAkUchkSg2LzeWHd47rCkRt3xnLjLGtWAccnsCbKxBG8cW1w35XuD9QKgPXfkpH8xUr5TSwEo4DPwbthYMPgy4S57w+Q8j/ADvquh+4D8mPbY8xpBiHWUhgeFc8wMH2yY4xjRjHS+BJfeZCqk9nYmJQA3HvaoaYgmzuxhFI0lkcRgCLp9HSWeLqnskzlvZMmEA119ano7buits70oELxscKRJJgZPt/iHP4d0pByIuUaq9Po8qCdyDZD6P5/CBwIg6DOvoKaMMvgXbUGdD6/YKdxfhNnP8at//qPvmR7fPxMzFwDqPAL39lcxLPie2OscBZ3lOP/BYfPLEhlidUVXG4bD++qMpZb2wFW+PYgwqNrkosU+agcTAAjXfuXD7fVyiWxir1DSNY/D+Wq/CPlGq/NfNxm5fHngN93C3QR10FY/gFIue4xvvsP8qPMaCE/N8h+5P4bQJDHPQFSYF9dl9KYHGxfxDbPs2n558+RE6issUrtLYFfKllL19iAYzJNde3XZUqLdPEclkym8MNQP3ET6KoifqPCU7Bz9UisYZojXtWGXX+vj7yGoE71oWcJ+RXeWLs7qkH2Ok++fHYV+5D4qcfYJCDHsUhE7b3fZ+wfbb3M7f/FR86SwOlrFQ7WFNjf0ult9+WKG0gVdeBSI5qZfMaDlsbK1c3VP8nZe8rOUEzKXGe+uFsjw1XVEGnQBN8lpC9D3dNYpdJ7DXGjuN1YHkvBpiUffkjGGwCU9tr+xyf+YhehnSfKb/LO0dI4m15oppMocyyVCAx3+eIzMATWdG42PQiJ/eb/jJ1w1+hAkp64AQKJfnJJ2mun2zBYz2WG5PM9SRhe/5Dbb/KcxNh+2Ikf77/14/ITsj/wPaLiJz8md6z5/4ZXphcEmv/9qKX+ptsrcH34CiB6OOL/71nMP7REuVPxp1mu82poY1cfl88cieIMPYbjfsE9tudtD0+IynqlR0/AyDb/yHZ/fGzCD4icODZ3u+iMe/ZeX8p039hyXKd3VLkvOAKA+2BmG4bgIb2QJWe30Gp1zoo9PmGeA4Slh3L3EdYdmz3LJ95V5HdiTYfE6j5vVv9jy6RPvXJme6zp+S4v7IF77FzPRdAntdCNK4thlzvD1C9kMxZ4PPW9Rzv197J8X62Ev8vN+Sdv5v1/0jpHzTikfc+ATTHBO/2+BSvnoxUz8npqI7uF0158CzRx/j1x+VxeVwel7+vwJ8tVzrIurv3fR75/q4/+fb2FSpR/7zS4Wv0ruMWTnMAN/wvUfrPBJg58wKl30y477AFLfInwv1Bp69QKJ0ddwe7HyHTFTisJNnCdQCqB3WS73GNn4uBsQH9emuc26APK/BIZocne+sRv3r/r0peb63orf9ZnoeOI731FbIeTdb92nvrZvL7QR8QdT94mbggFYi/DwKyTgNYhQW9jhSF6n69df/7qD6C6w6iHnQbKRVddxBOjnEb55FAF7iLazLHA6nylRTSAlf6/WlbAvk8H8ziSsrjPBGPy+PyuDwuj8vj8rg8Lo/L/75CY3J/9zcs7v+8Zwf+/16MltZH3ksUZgetsTFCoxubqtCag5gsrhuLw3NiCwW/i/9+XH6/9OVtwkVrau7XNKZnRH1zV2LLmEmjG9o65tma2l6utTfPNDc0Taqx1k8x19mnGGsbzAq1rlwolScLJDJ/gVgyWCRVPLbHnyxCec2D1/bRXfF1TWPM1sbRL9TUt32m1DW8oTHauoyWWoveXFuHyKY1Wep0Jms9et1Z1zLGJBBJInE+LZ5QlMQTiMKQPVz6rscR/Ofyp/xvLHItmV7EVN88WGdumac2NMw3WurscnVtF4tru8Lm1d0WSppvGmpGf9U8ekxdc1u7ob65zWKpb7QZrXVGHO+M2n0w0n8whyfAFIYoBn0WK9cbCd/F/695vf7K0pczTKS39VPqm0sUmqbD2pq6Vm2NRarS2yxiWfNBjX4smC09BOlrxhG5xDT60d/UN47tHt3Z09bR1c2rrWuIUGv1gUKJ1A/NBz695M/icEJRX0hC45GzwdZF3KuS8efzvv9vLAKZiUJnNRKvxYr26QJpC3CFDYeEkrr5CnXLKoWm7WeZog2E4uaTDKZtbVW1eX55lWluJc30XDXDMo/Osizgi2ufVRsb2k21dVSzrT7caqsPVqg0Pkwux4PB4ngS+czY3CAuTxjL4Oge5I8orhD/U77+LxScL66SXke8FkqbP+Pw7cBg2wCNNUjfjSCUNHzL4Zm7OTxdKYenyuaKlGkihTRerudHK4zcaJWJH6c0ylMVGl2eXGMsVWh1VLlam4LGoUicU89ktvjy+AJ3Bovl1msHPzaXG15Rrev+Hdb+TxXU5pewOQ1AY1pvsnjWzWJZrVkik4UIxFx/kUwwii/mR3Np5kxe8bjiyuQZtvLIF18uD3/1/cKwZz/NDe/5OCPWOi+fqtRX0qR0gURVotRos8QyZaxGZwg2WSy+QpEYx9a5IvJANvAtKxflFJRoZlbQDcScUFIl/btV8N9euCILpbHtaZwzr4XJrQM6y/Q5R2Dgy5VCH76Q7y+Q8kcJR7v50NPHcllRn73Ai9y5nZd4AMQp50GM8zYgkiVeAnHYYaB7fgdU9xcgOrD2YkyM8ousLGmrSivPkikUiag/jML9QCKVudEZTBc6k+leTeP6Z+coPigoUkzAvKRl/fkcWn9VMf0qz15hQdEgmdqWZahtkeutjQZrY5vJ3NBSVdfYGtj3G3P9vxcLgXQfwuKh8YZXM04iV0WKpVJ/NE8G8pSV/vQiTSY7bPVqadrFGwSmigmA8f7MXpwrrvFzHuWIpGn3QBZ8nMibFOZhh9BRujtRkbJDpWVCtVIji5fJFcGW2lpfdH1sA2c2j+acmqbqTEuTQmGxsh3z8nc9I/3h0pcLEZfG0eMK6lrae9Ba8CVEH6oM5rlaU+MqtaF2bm1j62KLvfUTa2PLzLqmjiH4938kP+KvC0dQv5HJM82Wq9SjRGjPxBFX+3ArayOYCZ81CtJOE7p9GE+NicihKPsll2Lf88Mxxg3neikZugQC3WshOAg/h1sOaWnCaWgPkIZsEITHIoFQ5MrmVjsmJ6smJieqIDVVdBuNRaWYn/9kfsU/WvryMNaYrP30JsNAg9XOR/vKaSpdTaPR1rwC7Xfmo7ZZj/b5cwVSw2yDpekjY13r+tZx478ZO2HKTvvozu/r28bG4Gv8q/yMfYXFJ5/zwhU11rF45rVypSJYIpH5suRpbrzEVwt5qUfW4GchY73j/I18PQBX2ZvDkvtL/k5MuD88/Ax3aRoQMYwZQ14CX3cThCAbhAbLITFR8LLerIhWqrRBVqvVp6pa5JGYoF4QH6eG+HgJpKeLDhSValwxXyJZ/V+m618XkUxJsY8l72esNUeL5caxLJ5uAY2lellTY5uqtzS8jtYVTUj/BqnSNEWssIwRStVGZAu0DzV3tHROWDW6e9Km9nETV9uaurA7mKK3/D7/QukYZxbXulMql0cq1Vo/tinEiR3zCYOXffMGnU3qXWkGIl8GxtLj3Jk4RyDONSJOuUc8w1iAnwGceJPAcxLjEvodzrcgj7kMtKdWQuCwNvD1NMKoQC2RUzM+nveS0aIMR3NyoExuDUtN1qyPw7k14xUQGyeA7BzZGsxbWhbzr1T5g4JzcBZXGYjXSJciOlu/rqyy5kZRkeFeQaEKkC1eNNTaZwslcrZUqVYgOxj5InUesgUXzWl1cq1pGtojLbQ1j363tWvit62d45f90XuzBXVGrtCk1eg0AXI13Y0Tu1XPy7sPXCmZp6O2iXzWqpxPPrtUnILad/QlEAaeJnJ4cp0OA3/EARANO0A841oQeQmYTLJvKLJwLsN9kDjwOXBxMBI5Tf19VMgO4p8yMwU2NA9H6Y0NUXSa7f+xdx7wUVbZ348oivQSSO8hvZBAeu89md57n0mZ9E4SaqiiiGJBcVFUbKhYwAKKoCBdQUCkI00ExEL3vOfeZyYZ0N119++u//d9uZ89+zyGlGd+59xzz33mmfPdRHq7RkUqISJCBNHRYsjIVk//jwn+O6NMIB0glldPL6+ogZLCZijJ6YCclCZISzFCaYXh88q6pvlCibRIodbJ8brTJApVAdbaTeiLqXKVsRH9YtBXWqc1dvR83NDRvaVp8rTJ/+xv4t5qMF9snSGRG2JqG3We4qTDzdwSoH1xOqYDNKD2pKcY4XIS3QnHWux+mmrOHfoVcO7bQfuIkr6SpN8V6btF2PS8CedpfiJMT9KPK2/gShg3uBZ83DAP+ZrA10NJ+qoeYXOlhXKVPtxinrK8qKgaoiIktLdoeLgQYmKl1zNzNZn2a/X2/s/MhfLyJqfyquwBLFbzQ6zSdmAV9AIrex4UJs2G7JgpkBrTgD7Qg9ZSt0wslRdIlepslUFfpDEYlCqDlitTChPFCkEcXyyOx71+tlRZvUyhrT/QNHnKe209vb5/7++SXq0ccc1Ynriqp3Ea31OWultPtNdaAB5+DKClBcCE2muw3lFF/ER15I86SvvVsu/dTnvildPehmuh2NbLlPS2JMYe/AWUlDB5ShH+A5QMXAcB904Fv5GNEO5fD/6Yi/w85LgmC9dYrNpoqbSpoap6+rX0dBWEh/Kp/mQexMVJ94YEaH93Y/zv9Hq9fbDLuhktWD18XvE8EOYtBn7Ok8BKfgKKJyyCnND5kBU2FTImNUN+oeFdlUGWJJSrBZlZsikJ8bIHcb3624Qo1dMJkxS9FWxpplwjiOQLxfGFpZoVUnXdubaeacp/9Pf5EitXpFAUaQs3iSUYq+ZG+JX0jO2ZjDHPZ3poysd/B5KxqP2w/cC9bxdw796Csf4J1d3eQ7a/lyrTT5UcWbg+yMnviL2G82QbRN/9MAQM7oIoz04I826EwHFV4OemhPg4aQtPpMuWyNq/r7RMxRqI9JZVow+UuE5IcE7owM9TR3rM7grw0T6EJsDzQH8vrYu3q2Ekng8N9tcNCh2v/bfubwvKFjiLCh8FSe4yEOW8ALyUF6BiwvNQHPIM04s27EHIjZ0OxQXVz7Jym7UpAd03EnynQoxvF4T7NECgrwF8vYwQ4GOEqHD92tJySZJUyQ8pKVUuVenrn+uaMev+2/+m/bNMPLFlgTxjXZ6gCH4iuX7FSoBZM5h8I4+9AUr/0yAddRwEg/YBf+A2YN31Ge1fW+EQ77drb+/5yEq/wfT0ngj05xPxayH39UKU83SIC5gCoWPaIGhcDQS5686yWLLMgsLqnaS3r1I1GchaHB6qhcgIrEkn6SA8yIQ11C39bIlPSD/bXXj8GO01tMfQZqB1o0+a0KrwXIemsZkWv2b29lLW+3lqOkM96npC3ZtnC3KW7pbnvgKK3DdBkv4W8CauAlboG1Ac+BIUBS2DkojHoChxDnDyWmpzvZeczXNbCtloKa6L8LVMBbcxRvAYawYvV8oLhuBAw8ZyliiewxMl8SXm5egD55au3y4FUlXTEC63YTo36/o6KdY4q1YDLH4E5wBZN2Ougtz3NIiGHwHRvbtpz2TSQ9auvV13xx7Cjn2ES52WASvhe6o/7UN7/0HIdHoJou99ACaOmA8p/nMgevRUiByNc2FMA0wIMC7lCaqai0rroLFhLpSX10NEqB7CQo0wIdIMSXGVEOhpor0u7GbvN+xo9r7DtvObeLyBdt1uHi7am6EubZA4Zj4kjV4IhcFPoe5vgabgfZDnrANRwsfADv8QWEHv0V7E5WEvQsXEpcDJn/5imefrx0tc8bWiFbq8ChnDl0HE0FkwbqQFfWDGv2mCAI9K2o8hPFj/ttbIDuSK9Z0SVZXb7807nsiglmTv7Cb14rMrUP83AGqkTL9mqdcZqj3pW0zyDelhS3oX23PN7boTK7qlt+0zUDbpLK2BtEkAksFHoAC/P/7ex/C1L4ZU74UwafQ8mDBiBu1rHOnc+GMp7rtKyqzHBIJ26O5aCLlZdRAZaoaY8GqIi7bCxHArZVKTXsbk+K+an3MtpI9cAvkjX6JWNPo1kKWuBl3JOtoLWJH1GdYNWDuEYA0xfh2wQ94DdvQbwEl/+gwr4N3dpFcLi/RrcVsHhaPWQM6QFRB1/3wYN8wKrqMt6G+MlXE4t13nQJBbA+5zFBYOT1ImUVjiHXVnC5m26cKKyVMw51+ePI3pAdhhxrU2BmivQSbuv6C1DNND973bcs3r6IvXbukpXIrxbe+nXOS0BNgJF5i9AuovHXQCv3c1JA96BtJHk88GPg7JoxdB/IgFMGlEL0SPmAKT3Frek+vN6oIiK5jNM9AHj0JaYj1MCK2DuIhGSI5thmDfKtpj2dHI/L/9a66jTX3HsaP06ONZUDDsDXpPpGTEGigd8T7wg9aCoewT0JV+DqqCbaBI2QGCcFzfQrai9p8CJ2ItcOPfvMmNffMA22PLTbbHNiBG7m8Vj1oHuUNexbp6EbgOrYVxo8wQMLYFMse9AxkeyyFx7EMw3qv6OIvHSyhl6bi/F//inIPfiLGuf3Y50zuL6KT0P0v7pdq1v71XeYUt1stsepc69K8mx2Jbj+MCp0ehPOsmyHEfZsD8o7rvW/TNWtqLOANjL90VfTDqKUgc/hj1Qfyw2bS3dU58jbWcU/tJbn4tdLQvhMb6hZAQ3QSTQlsgIbIN0id1Ur1Jr2vHPte3m/NwE7WRQzWQNHwJlA5bB+UjPgbWyA3AHvUZcMdsBmPZRtCX7QB96W7QZH8FsoQvQRj2FfDDvgB+xFbgT1qP+8k3vmP77PqB48X07SL9SEpdd0LpqI2QNfhNSB34Nxh7fwPNP6Pus0K09+uQNn4NZLi9DLHOs25MiFDhHlqXd7v2ypijUXw2XKhrBXjyYQAL6TEVdB6Uw0/QfM9oz/RKJ7rbY72vT7ytfzbps1psM6J7IeZ90kub5J+iPKbvnAnXX+XAb6F04MeQNvQ1yHR+HTJdXoCMMc9CyoiltK92woiH0RfzMDdNOyiUVdaVlNddLSltgQfmLweVbA4kRbRDZlw35CRNof227frajfTath/7zQyZQ1+CiuGYT0ZuAs7IrcAbtQNr6F24L9wCxopdYK44APrCQ0B6EUsmfQXS6L0gjPkSBDE7QJy+9iY3eNcJts9hIMbxPQIVXgcx/vdC8ejPIW/wB5B29wpwHlSPOdECg5xqYNS97RBP+l3447+NWwaRXg3ry9ja3Nv152fcnEzu6cyYwfQB05B+fKOwxrxvL+q+qZ/HgHrb84o9tzhaLu1hvoz2Oib9y/Mx7xAruPtdKCrC9UQJUBWO8X/vSSgZvB7z75s4R1dD1rjXIG3MCsgY9QKkjnwGkkc8SXu8J41YcD07fPJkFq92Z3FJI+i1c+GpJW+CXDQP8pOnA7twJvDKeo/EBnXCuCFWcB5SSfuPkT7nowabHMwCKUNeANbwz6nuRHPOmC9p/2+R71eYd1B77kGorDgOuuxjIE08BNJJX1MfyBK/AFnGZzd5MRtPc4OPAi/sEAgiDgE//DCwAo9AmcdBKBuzA8oGb4Lcu1aB272t4OXM9IMjFuT5Iu09kub5DkwcN/NmVqauxFF7ecL3wzlF8CnpFT61DeMzA3MP5nymX/rnttryNao3ietb+6Uv64txYtn4NaK9Y0/xTMw9+RgfZF0nPc+qAn8FFeb/nJGfQrrzash1+xAyXFZB+rjXIcP5FcgctQLSRj1Le6snj3wC0lwf/ILHtWrY3CYoL+2Ers4n4LllH4BCOP+QXNbzvKmqsU0umrNx/MhO8LyvCVww/sYNqoEx9zE91wcP1EPG4FeAN3QrcIbvRO1R9zF7gT92P/BcvwFV6m6o5B8Ei/DANUPR8YvqtG9BEX8ExHEHQZG4D2Tp20GU/OkveX5bbpAe7KQfOS/yW2r8sGPA8TkOrLF7gD1kF2WHeN/TA96jm1F7He3J6Tx0OkSHbILkwPWQPO4ZCPWq63bUX5B5w5eN+cZoZHrBKcN+oTmfN2An1Z7kGLvWpG+73UhcZ9uM6EyO6bS/+dNUc3sfd9L3Oi/iG1Crmf6iVd7XQDTsOOSO2wRp7h9BjudHkOmxBrJc36E+IH3dyVwg/e3TcG3OGLcEimNndfFE9Wsq2B1QXtwDCxe8Ci+9tHpVVW2toqVrSsechQtXSNmLj4wf2E17vXsNbAH3gegLtJz73gDekG3AHf4FCEbtAT7pozL2MAhcj4EY49fM+QaqJN9cM4s/XGbMPQvK1BOgjD8GkoSDIEvZC4p8XHuj99ws9MV4xJgXTThHjfSEF0VgTR6CPhh7kLJMSF0Yctd88B7Zrz85+vl9AnGkR5r7WxAxrvOYo/7CtMtaUhdaMP71WUD3WJJ799E6h9F+GdU20+kxh5h+jGpMtLVrbe8rn2zrLZ9s6ytPei+XZP8Kk1sAJlfg/PL4Gbhjj6Lmm2nvRto725v0EH8P1+HVdC6QfEQYA2nOz9OeXjk+T90QcFtqecL266zyqSDizv1lxSurHl/+wgvtTe2Tq6b0zl04ZdbsZ0Slj54IHjAd/O/qhoABUyHz7pV0ry0YvBv4w/eBZDTp4XIcpONOg9DlDOjzDpL+8z9WyTe+ZSg+clWXeRYUySdAnnQU5CkHcB+wGeRpXwIX456NOacsCPNS7CWQTfyRsegLIAvHXOF6kt5zFDhthRjUwXdEC+3HyvRlVYCz80KICPmK9umLG/MIuHiK+3pY8HPgFSHGfRXGpjbqBsidTwD3rh003xPtida/17PfbkkOvfvtfe1J//54p5m0r30a1saEAbIA69qWFACd70Uo8T4Emb5bIT3gM8gK3wg5YRsgN/gTyAtcC9mEp+HxNsb9m5BO9jXuL0GW9/OQH7hkr1TdtJrD7QIeaxYY1Yt3r1v/yfznV7zS1do1tba7d+7DU2c9+Do/e9G5aLwmcg+Kc/dm4A7aCaIhe0E84jB9baTvv8z7NGgmHAGr5NANq2rTx7qivZcUaftBmryPak8YAMq8HaDO2kO5B4QFIIw5BayoYyCYdA7kiddAlXCV3msnTAByL4zkDInTbnz9SyBgeGef/tQHA3QQFP417ROYgnEV4GKlkHvCCRCVwCnCyTBhbKpDL4Bk2H5aZ9pZGHaNE20a23WOczgnWsdSm97XX58wBCKdWmnd0z0VYF4dQGM06h91Dsoj90N60HZIDd0GGZE4L7G+zgjcAFkBH0MOxgjxQ5Yf5iS3VZQtkOX5CuR4vHC1LG3uGpG46zKfPQNErEXQWPXcibUff/7U8hdend7ZM6exa/qD8+YtXvK2KvadC+SeFNmnC+7fC9Khh0A26lvKHSBaEYZAJfvgzWbNvi317O+PVGdfA1PqL0D4E/K0PaDJ+RK0RbtAHvcDrgWXqEkSL4IsHnNP/Gn8vhtgSLuJe/nroIr9BeRBZyljQ3HXQYzT5yF4aM+t+uO5m9sKiMV6Ns39A8Io+KYv/1Qw76GQ+5ok94ju20Xre5JziPZEZ0d9Y/tYDbNQ3zmo80za2z6SMg06IQw1D3NqhkCnKkjEGFfiuvLcU0w/+7qJJMedAVbyV3092bJit1MfZIZupkyDfrYDzgW/9yET64Zsj1WQ6Ub88NI3Qn7HlxLRTBCw5oGY9Qx0NL0N77y179TTSz491N25+kLv9LUnFj2+9nAj993T4vt33ST3OqQjUPMx34PG9yzo/c+DPvHklWbVsT1NnF++rcHcaEq9Aqa0H8GY8R3mpMOUg6NJPQ+apKuYj66AKu0qqNOvgDrjZ1BnXqRWXcLwEAyJN2g/ePFo9ME9R3BtfBNCh02z5X+VzXQwbHAnTJiwjzIiEkY+9r2zmyCA6s9l3ksh92WkY8/QmofUOiSvk/gmuhNt7UY0JhaOcR529zQIvaeLWhCue4ED2sHXqR68Ufvg+xdDTgHAI4sZjkA7zoOW8l+hknMWWFn7sX7fBYkRTP/8nLhtlKfgOBfSAj6mfsjyZdaGTI936XwoCH52jVDUfV0imEc5E1L2Cmivfw9WvnoAHn14B3S1b4IZU/ZAc+POa5XydSd0XgeuaYZdAIPnedD5/ADG0B9v1pZfvFRT+OvV6sxfoSrlOhhTLoMp/SKYsr4Fc/l20KO+OoxxY8ZNMOQwDJlKrJ+rS3+FmrLrYC76GSzFTG9v4gNdzGVQeeJeddARKBzwAUwYOgdjXndLb/7776mB4ODPICloK6SOfvFykHM9h+hPOQVicl/yJkhHH6NruJ1VYtee0ZvhVgTT2GYsAGvdgHvJsdHGsKgGr0EWcLmrCdLjT0DHFICN6wB65YzNrPkVapXfQXnBfkhLQv1xb0NYDtQHSdsgjfBN0AepoUz/OjIXSE6y93khfkhzWQOs3HnvyySzQMx5AATli0DOeRNz0VpYuuRrWDB3PzRY94CIv+cXLndVrXFaRW6lcPniuorNX3XJTx1sFPx0qSrz2k0L5g8z0ZhqfwksuaehsvwrMOd9D+YsRnMr1oUN3P7+8H294MU3oZ5zBWrKr0EDfo8F85Aa55VwyFEov2cTxA5daOsL79gbH/cCASshAed90tiVEDG6q8euP43/2GsgHnWQ7nPTcM0lXBhHXgjRneGjMOwMwpLwwnMP9CvRnLBDxg1W4f5bTnkIpK/+J58w/b5J7iEsmQe68TUZzmIN+Q1k3MbTmBSxmzI10ifugFTCFgnfQvkiJCel+a+n/cWIH9I9PyDsnYt81qxjavl8EJU/CtKSV0EpWA1Wy+cwpetrqDFhjclZ3yXXRXiKZcowkXhqamfD3vdnLnhwTWv3zCUmwYYPqrOv3KzGvGPO+BGqcs+Bhb3/RlXJGaix8UdIbBPNaT9+bT+Pg5yTPvzE2iS/Uv1NyTdAH/wDCHGNKR+0HeIHP0F7Effrz/jA1+MR2r812fU9cg/qhbFe3Hvt+msmXgfRqAO0bkjBeofhlbTbNGeYJXbtPWwMC8JzGOVkgMF3aWHEvSrce0poX1VyH+mNVQAvYu7pYjO950kf8lmdGG9qzP+lhyA9/SvKsIiLQ4vp90PKhC/Qf6Qn6Xbaq5P0ryR8k4xA3K+RPmdoGZ7roTDy1cNi9kPXazWvgKrobdCXvA9a9segFO8CieSjbqHZzY0rKfYWSRQBWtkTqV2VF75u71r47ORpc5dbrJ2tRtbXp2ryL0FVyalfqpQrV5jLDp8h2rdw+3Un/c/tfd/JOTmS/v+PTmGOpG86YWLUkXsrEb+AxB1ja9iXkDxkOYy5p/Y2/XUwakg37Zub7PExTBq1YLO/m3E4n82sv8pJv4LU+RDVn9TwYVi/EGYGw0ypoubhwA4h7JQx+DtHol+H4d8ZQdbb4L2UJ7r0OYB3X2U4IjOUTM/3RWit1h9BIT0DJSWHITv7axtHZC/Ex++mc2HShK8gLnIPtaz4nZAUuQuSw9AXIVshLehzyldJ898EGYRF5bENeGkv7DMKX4V65ftgKfsQTCUbMU9vwxy9//Jk9alPZjcdNtXU1gRPa9tWNX/yD0fbOh+a19Te+1BNQ2dvY0fPrIaWBUvrWrsfqheeOVaVx7BFyB6R8GeI3j0OHIJHHFgsxIgPHkL9p+qZnzPE3gBF8BmowL112pCV4Hp/02/ifyAa6fua4oH7sZGLzgU5N48jbEay/yL1jwz3EWUD1tLcP96BV3Mrs4XhthCGCdF/lJMI/O+dQ/spk/fpn3wGYNXLDFOC5J35DQwrobXmChhV54HPPgVFRYRhcpAybdLTGT8kY01E/ECYLmQ+EB8Qvk0S7bf8BaSjHzJDdmLduhMy/HdibbQLcr32XBZnrj7epPwQ2tSfYSxvgcqCL6Gu4CAQ3gdhXLRWrsiZ33N2xeLeq1ebW+f0VNbVttU0Nkxv7Gx/vLH++d3NotOX7PyX2op+5hHxAWEBkBgnvASiN8mlJI9S7TuY10bin8wZcl9XjfsxjttxyBmxBgIx1hn9+9cAwooJ8XgZUjw3wMSRj0H46C5vXgFcIPfe1DgPVGGngXP/Fqx7FlFWEol3won5rQ8M1JwJb2TMGijIYXgii5cAPPO4jedSxhxJv/tazRXQSy9Qlk5x8QkoKGB4Orm5RyhHJjPzG8rVScM9UAru+dMwLxHeFPFFKumZPPFLyIxhjPQUzsR5ljN+P+T5fw2l4w9c1uav/75VsRW6DV9AXdF+qM8/Bo24jrYW34AG8Z7mVtPFfS2681Cl+nx/Y9Nj7zQ2PPtZnXbjmdqK729MlTLMn6lqRn+y3hJNyX6FcHiI/oRRscDByNcoq8HM5Cl7HUTWcylqmOW8HiaMmmtj8ziuwSpwHTQP95dYg454CoLHNqXxMn58lXJ6hMyzmmKXw5Dv9BqN/3G2eO+Pe+bck9SXAx4EVuxZ+gyW0oLXtZDJMSSOWrFWaylj7mNrWZgXKy5AaRFqn38SNSccG4bnY2f6EJ4PwzdiGEeE65ON8yIT50UO+iML50Yu+iQP/VGIRvpaF6OVTvgauFG475+497yh8NOfCOunmX0YGvJOQVP+eWguvAIN5Wc316ku/lIjOQtW0RGolRyh7B9L/kXKvrEzYIgPSL1D9CQslFnVjM2xsUDm2Hggdj4GyU9kDSb1EKmPiN/I79AnX4QcN4zhUU9SHs7tOWgwakdYhgnDnoXx4xqNosK188kaQJ7PIc+oySf9BIIxBynv1A9rnlGUkaWhmofgPjhh1CooizlM38slzx6aG5n71jOamHlItCdMN1UqridZPwMv9zyU5JyBrLRTmGsYllBGxnHM/yeoL/LyvqV8qVvtKJQUHsG982EoxTxVgj4pQZ+U4RwpTTsAZclfQ3nCAeDEHwJB3FHKQlIknzhvLvziu7rCr2/U499rKj4PjUUXwFJ4EuvKs1DNOgu1nG8wxg9cr827fqOdzeRtuw9I3mkVM/qT+Cd53W5Ea1L3ONY+jpwe8jPEV4QZVl16DbJ9v4LUUSuwBjLfFv/MnoDck0kZthzjv2WWnPvkI4KC767YGUXkGRFRGu4HYi6AOOA4cNyPACfoGPDiLgDhdrNtzHIxzr1q1L61jrmvTPhNhOdTlYTax98AUdKPwCLMhLQzkJ18ElITT9g4RowPiC+ycL+Tm3sS58BJ1PwUlJaepMayGaf8BPDReJTxdBx46Bd+wVEQ4rzhZx8DYeYJEGWeBnXWd6DJwr1V9qUr1SUHD1YX7blmxd9Zg/415hyAyrz9YC7efrWm+MQ5cp0dXKY2IHPVzn8i2hPrkN/KgeqQ36q1nQXVaDsndSmZG2R9JnwdsmaX4j433fkdcBtU/xv9yZyIHPw0JA9+HkLGti3RiBYtVpauXYFz4EaF7Xlmko/sz9Lyb+NGcbXMs5/0frWWmXck5psymPe2FJOuAD/+Iu0nXZJ8mnKkMtEIR4pYPO7LkpK+xTz/LfrhJOb+k5hzTqEPTlOuVEXpaaioOAMC3CeLeIxJbabgngEZriFy/HdZ2RlQlqHupefAVPYD7oV+gXr2TejAa23h3Thcyz24t7Jsyx5L6da9taUnjqLu3xPNKdsNdZ7qwJ6ieyvx7zOofk97Ev+UzWdmtCfrA6mJlmON/RCu1ez0Q7hX/wTChk+/hc/G6K8Dv3vmQdr9LxD+8bta4VPPWmtbuNrc02vIc/zkWVmitf05cvuz5Wy1jVkvZ97HImtOUwmjO2FYkXunipgfgR97DirizlAeB8PrOU45VsQIz4uwrFISvrWxzU6iH05hPjqFazj+TMEZ9MF3wKn4DvU/BxLh9yCXnAed/AI1k+IiNYvsIpilP0AVWo3sJ2hQXIFm1Q2aH0juJprMwWMvajQLvzZTzayvJD/b11q7kVxDeG6k3iFm554RjVtsRmqIDv2t+wFidF1oYPYChC9FOEtP4BooKsb57reZ8vEG9t2H09gYWQbM6a2QOug5CBrTtsUgXP5KbW2XYNWqVROkcXvXCzJujXtyJO+bk2cPCcunns0wCOtTcL2Kxror8iqooy6BNPo74JJ7tBOPUxYIYQXloxGGVk7CUchMYozcl7D7gDDF0ghrDdeGNMxThCtWUvAd7o/PA491HmS8i6AW/0D5Ymbtj2DV/wx1xp+h3ngZmkxXoNV4FdpM16DD8ivlYhGuEmFKkXqRsLVIfU5qxAV1DHeMGPGL3exrrN3s625vZb/GdrOvvY5m15/UooTvtHQB5iH0g5z7PaQEf0nfRxo+oMp2D1RHGU3kPsQw3Mcm3ruE6L/fIF7xVq21V/DayjejH1o01U3it2OpIurkVVX8NcrvJPdCyHpak8LEOXn/tir4Mlhwv60L/Q6UYSdBEI65OfowsKIPQnnsN5RTRBhmDHvkMDXCc8u1+YDko4yEb6llJdmZZqex5jmL84Bwzc4BG30gwLpJxrsEWoxxs+YX1B51r0TNrdehq/ZXyiDrbWH2F+T1k9h7ei6z3yO5mNTqv8c7o2zHFpt/Wpg6084/s3PAbuegOdrtfiEcrXltDFuLPDui5v8IibiXzHBfTTnRZA0g+g+26T90QA199ixoTMtxs/TVd+us8/mvvPZqxAMPPuDWtd3pXq7v6xKRx2efyTy+BBXuJ9QuZ8Dodga07qdB63EKlJ4nQepzHIR+R2mvfHbQIagIZThRhJ/GsKLwfNLXlAOTH9/PUcu1+YGsC4SnlolzIDMRtUcf5KaehXz0QVHW91CG9Tur8CLwMLfLeD+BXvIL1Givog+u417Opn0H874Oibkn5vez1kgeJqw1wp1bOpvJDfZ9q33/ROaJna1pr+n/EYOO5B1inTYumz0fkSP5GmGY9VgZ7pmY8xPu5/dChu8GCB8+py/3D77bDEPuMcCwu6vps5eBzs3fmiVvvldb9aDwhRUvhs6dP9/NarUON7YV3c/JmOJSPP4pVqn/K8e4gWuB7/k5CMZuowwWgdsu4Hp+ASyvL21sJobDQthtRSEMt6wEawDCsbPz/Ox+IDnJ7od8rIUIzy0f1wGiP+XaoQ9yU89RH5Ri7VqW9wOwi37Edfdn0Apx3imvoh9uQgvqQjh386cwnFjCunv6IUZ/R94d8QGZE3YfLHbQ3669o+72GJ/qwEK0156OHLxG+W+NcNbIc8qc4p9gIr7u5OBtkOLyN1vc61B7Cwy7Tw+jBpnos8eBzvUnLNJ311UZFpmWPbcsaO7cuW61tbUj9Hr9UJVaM9RYKxhM7pHmx8yVl0Yt28uZ8OZVVtAHUOaxDipwD1fq8Rll5xFeWrEfYYXtoSycEhtLsDiyn6NH5kPBxIOUKWj3AeE/ER4Uw9RDs/mAsM2K076HgvTzUJR5ESpyfgBuAcPXU3CugFZ0DarkN6GB1OVWhrNH/EB4i3QuzGbssd5+9h7JDaQ2eaCjP9Zn2OJ8ajUTv4RX2aHvr/NpXS2/lcfnyOQjzDpihFlHPouiwnpEXHgDY+cHiI09BLFhu+l77p7DG+kcGDqwEkber4fR95vBc0gT+I6r2V8pe29jpfbJuU8984T/zFmzXKn+BuNwrVZLTaXUj6rtFo9pn9kYpmItbCqb+Ld3KyJX/kr94PsRlLmvp34gLIpSnx1Q7PuFzQ/7oDCI8UNRxNd9OYlwHQnDjjANyRpNGG6FlMnFzAPig0JcC+w+IHzDMvRBWeYlKM++BPy8n4FfdBkU5ddAy71OOYzkvWviiy4St6jpNBLLTf1G8jPReppN6y5bHunQ/5YHeDv30K4xYQOS92iJETagnQ9IP3eVxzD0uFk38TqvQF7aBcrGiwzdS58vmDR2Pq69Bhh0txXGDNPT58HcR1jBx6VyU7Vs/SdV2mcfeOTxh7xnzJjhQvQ3Go0jtDo9sZEanW6MyVLlMbmrO2rOvLnJal11LKeoQ1kS/fgXZeErgRPyAX0GtQzr3b75YPeDP86F8TgHQhg/2DmCRegHO0uw0MbRs3MVCVOQ7BsYlt5ZKLXxFUvSLwIrA3MR+oGd+RPlLPJyfgZhwWXKWpSX/kqfkyfPa1cK+7mLtbflCLu2jvrauYt2fe38RUeNidmZhXZeIa0V0xlWITf1BpSnXaZcxrSk7yAm5ihlEyYFfQ4Z454Hz1HV9N6D60j7c6pm8HE1vVKp3Pheje6lh+Y9ONOze8qUcdU11pF6or9eT/LQKJwLzpXVVq/unikTeufMTdUZTeFKjT7V0CBNKc2Y3FEY/NS3rLDXfy31ehdKXNZBufsGKMO1guQkyqfCvbh9bSB+IPw+skaTezekViKstjKcC2VxR6kPmH3Dt5QBbPeBnWlI/FCWepHyFwnjsSL9EuU8sjJwTmRcBUEmw3q0cx7lBYzZ9XPUkhjhWkptfMvbWZD2I+FBEuOm9uvMvY0LSfiJFek/0WsqxOvMiD8NEyYcghB8vQnhu+h7p9HOM2n+HzvYAh7jdPQzAn5uxoeqVJter9K+8XjP9C73jo4O56qqqhE6g2EkiX3Uf7TBaBpbWV3tPXnKlOjZc+Ym643mUKVan6rSGfI1FlWGRG4oLo2ft6Q0bNm5Io83oMzlfcrRJM8FUz947aJMUTIXivy/pjxNMhfKMCf18SRtc4GwFMvjjtL5QNiBpYkMT7FvLqT0MxXtrE3GH5eAi6+f6MBJvQLctKvUCHuTWtr135hdR0ZL5shOuU517df2KrCSr9zKqLTxKctsfErKyrYxKknOJPV0Ml5/DL6u8VgTkjWAvH+X6vwM+I+uh/txP+DjrqOfz/DzNNTUaD5/tlq75rnWtnb3hsam0UaTeTjqPlKj1Y3U6o2jNQbWKHMtz7Wza3LErDmzEwwmS4hCo0sj+ss1+lw8z1MZdKUS4eTF7JS/XS90W0H5leU4F8pcPwWWx2Yo99xO/UDmgp31a+f2kZzkyPV0ZFraWY52rmWRzRcMq5LxQ0nyedvr/8HG7fyJGsdm7Nv4n/0c0P5z+/fRn7exP/v5nwwDtAL/DvO3vqdxQIyZm6cZFi2uXYQrSfabKTino6MZNijRPzlkE2S6vUGegcaYNxPd6Wdj/L30E2r0Wx626j9cVV3d4VldXU3ifjjWPiPUavVImSZ/eFvN6sqO+o+gt2ftqTmzX2CZLOZAzD/phJtJ2KEKta5AodWVq9XTVitLP7pUEbwLipzfglI0wlJluWykc4E8K0+Zed67qR/KyFwg/ERbTqqIPtA3F8pjmblAchLxQ3nc8T4/lCac6mOMUl4o4YfafEE0+qNWcoue39t8eY6ZY3bOKv0bjvr28ztp7ZZE9pWHUfNDWE8foGzS9IS9kDLpK4gO3wsRIbtR/530eQLKCBr7NwhzbQd/dwsEeJpOBnmZnav12+bUGdZ/ajZO8SGcWqVSOVQmUwyVKMqH1pvfFTSa37/ZYH18UVf3vKoF81c+0NAwJVSu1qZg/BegD/JwDhTLtcoKo2nuIWHexu/EKQeBE7QTSkZ9DGXO71CGZ8W4DcBx3QQst21Yt2I+8vmSMiwpzzTgQB/X1c40tc8F4ocSG9uU+IBYcVy/H+y+KE0408e/Labs11v/m1hR3/w51acpWeuJ2dmohTbeax8X1cZGZfaPDEM0x4GPmmXjo9otM+5LyhCdFPEF1X1ixHZIiNhK+YtpAesosyVp3CKIdOmGCI+mmaG+VQOsxh21DZZP9xt1swIVCsVwsUQ6WCJWDtHLl46zaj46btEvbq5prM9tau8omT1vgXjajMdZUqUyAfUvUmkNhQqdtlSpbKo1mRdeKE/86rIk9RDwI/cA1xXje/inUD5mTR9Hlev6OVS4bqU8WcIRpQxN330MT9XGEmWFfUPnAvED5aqiDwhftgLrOeIHMieK0A/EGPbwCRuL+ffNrqejpvm2e1NkT27X1q6vnc+abeOz5sbt6WO0Ztg4tJl9nNbd9D1q8qwAYXsy9jnaJkgO/6yP8Un4nmmBH0Cm9+uQ4fY3SHSfty/c00T3VrXmbZzGys3XDbqF4SKJZDiLlzpIwNcPtmo/W25SrnzNaKnOIsxaa0NT/ozZc9lz5j8gwLjPUOuMJeiDErlWVWoyzXpNWv7eyXLccwvx9XGi9oEA8wp/LMb4CMz/oz8AtvNHfSxb6gP3nb/xgX0u2H1A2LZMXrLPhYO2dfpQ3x6ikPKwD9n21rdan6ZolOubsJ+anX9LjOhL3lcjlon6ZsUSrTGOY7+4hfdLjD6PYWf+OuhLmKJ2jirhxhI+LmFtEn4s/fxLwNs3Mnxfv5Tptfxshsfjfc+fE05urXlLRH3lDrBWLmUPHTr0Lr3sedcq5a63zYr3NuD6m4X1ZoahsiqrurYhb+qMWeyHHlms0BmrytV6YwXGf7lCaxJVVz9yAPdVv5Tg9Qtx7efZOMJ8zC+8cV9AxcidwB39EQic1zMs3XFbcS7YffAF/RwPyUcs368pX5mwhQljmYU5qSzC7oP+vbTd8ifuxz11v+Wg5cX160vyA9GX8HftOYKwlHNsHF76fjJqbH+Pn2Hxbu/j8TpyalNCP0Fd1/exTIm2mcHv4/E9yBr/LmSMfwPSA1+/kRnw6vHMgJe3ZQcv/zTT96WnMn1fnJbl/Zw+3XtJSrJP931EdwJ5mOjH9EPB/O9ZadgNdaaPTlfrNiyyKHedMSpeeUytqyzUGoy5OqOlQG+uLDZXWQvbJ3cVPfjwIq6xsrIC45/0muBaKnvmSNnvHyMxWIqvjx13gDJ8RZg/xKidEHXlu+4EjvMGyjIWkKPz530+IDyzCpsPyFyo8NnPzAX0Qwn6gazP5J5SAdZKheH7KR83H41wjonlxeztt9ivqGWjZcbsoRrfyjveTZ95ZFjA22/jAW+ieZronBT8CdXZzsklWmeErIa88LchN+pNyI14BXKCVxzNCnr+lZzxz07LDn5SmBv+SGR+zOzAgviewOIsazCfW5ky3Onzf8oHqTauG1Wl37LLoPrqqlm5+aRG8ViTSq/MU2rNeUqNNU9jtOSaqzt4tU2d5q5ps6Y+uGjJkurqBY8rNdVKld4krzQ//m556r6bJTjXCUO4YAKZAyeAj3tcUfRhkAQdAqE3+gJ9IEbdRWM208+72X3Ac91OfcC9xQfM2sywlfdRtnIB7qOJ5eOenlhOeL9l4npDjDwbQczOlibG6Hx7TG9yyB23M4k/pDkjc/zbkBW86qecsNfO5oW/cDgvaslruWGPVRXE9MYRRgkx2VSne8W1zvezpEmDy1ilwwiflvTx4guE4yVKg/c/056MGuOz91mUa6bqxKuWyYXztTr1gsq61m6rjvficovpwVfr26dvmDJr7jud02at6p45953eBQ+/W1/38Cd69ZO91ZYlbyoF73xXkvj1TTQojCN6YG0z6Wug/chijoMi4hDI/I+AxOMASFz2gGTMjj6z308l84Nyrck9VVwTSI3K3FfdAwVoRQF70AeM5QTvgVys6zLRskIxfwTjehi6ixp5Rig9bDukhW6jOjPsckbrfv7zBhvDvF/vlIA1kIPxnR/5BuSGv7oPY/v5vODljflhz+TmBDzsrmuJ85CYEn2Uzf5DzXXc4XwhfzjqTHjQ1MrKK6gRPi6LzRknVqhj5BrjvXyJ6o+4wKmp5ZEGa82SA/V1Tx2ob1z4Uef0Oa8r8tZcrmub/GzL5Bnbe3rnvNndO/ftrhlz3mmbOuOV+qY5WwzyFafrKl8+V5ayC/Vn+NlZGIMF0biehXxF6xfphJMgizwO6tCjIPc8BnLXb0A+dg9Ix3xJTeS8m/KUyRrBd/mScrVJfUr2auTeBdk3l/h/AQVohFFLGNvZWNtSC95BGdvpwdtstoXuc5JDPrXZBhtL/ZM+BnU6YVDbuNupgQyfNnv8SsiPXvFNSU7veXZZUwu7rDrLw+nYALs26mr23RKZIobD5XnjvmiMRqMZzeXzGR41i+FREyY3ZTNzOM5sDtddIteEaQw1A/+R5o4DtZ3Y0NG9taG1d+vk3hnv1BiWfakRPba3afLkF5q7Z36Fcf/h1NkPfDxl1vyN+L0b26fOXG/Uzn+5PG3H6iJc65hncnANxRzEwbqahXsmLq6/wqhDoIg6DZqwY6D2PwZKN4ZhK3f+CmRj9oDMpj+fcL0p19TG9bb5oJSyvXdCvt8O9MFOyAnYAbnjt+F6hxa0FY15NpSxjTat+/VOsdUh/QzsdykDG2uRm7guns8a/9TK4oyOTLHJcxxXbNzNFRpfEoglRUKJ1MuujUShul8kkyXx+AJXq9XqgjX6aNT4Fh44McKHxq+78IVif4nK4PeP9P690dbdW9PaM/P7linTvtQIn9pmrGx4ora1a31z1/RtaPs7p8/a3zVzzhbMQ8tbpvRU8hM/ep/cSy6YxDwTVY77Qy7uR7m4N+JPOoH6H6AccWXUOdCEfgsa/xOgdvsW5C6HQen8DSic9/XxxKkPXPr1J/critFKvLdCoc8Wanl+jGUHbIaMwE30eVzCV7fXJP3xve42zddQ3jrh0RIOc5b382uygh5tLsszJkib3cdyBKU+LA7Ljycyvcbhm84JJUoJ+iBbKJEPI7pIFerxfJE4ymg2u9bV1bkKxWKSY0aSXIM2ksQ9Mcqp5vLdBCJZmEhW5fKHde9h2p1WW2cPQG3TJ0+fvRpzzp6G9u736tu6XkKb0TZlhnnyjNkTMe5dWrqn0esq9/60tiR6L+Z8XDMx5itw787BfaZdf/6EI7SO1EScBW3Iacpy13qeBKXLccryVY49AIox+0GM+cg+Bxz1L/HaRt9TIHzzAt9PIQct128j5gysTQJu5bs7GtnnEM3tuhPueZbPq5DmvQywBpRneiwdRK5fIBLFol7jCf+dzWX7ofZPVfDMwBPpejH+y0iP8O6pvQOkSnW+UCT1be/o9DYajeO4PP4ozDOj7Dx025o7Gr/uzuHxPARSXcq/Gvtk/BFWvOPguL3vVYx5uAzrzQrcz3OSvgNewhkQTDpJOeqiqCMgwNpHEXYCdKFnQBd4EnTeJylLW0NY0s6H0Af7+5jyjvqXeTDa2/nqOVT/DZSzzTDmP6Ks9dSAD6kRzYn1x/tqW54hDGbCZn36Zprnwhpy3Qnjme2PRKoYzBVIk4gPSA/qCo6axeaboYJnPCpRqiTog1KVxlAkFEsnVlutvp2dnT5SmdwZv3cM6j+a6m/zAeGzY83jzROJgoWyysB/R/9/ZfBiPqfHUr/P9pF6vALrTaI9YdhzY/GcfE4Q6x7yOX11+Bn6nIQu4BQYfE6Bxu1UH89ainlIgj74jf42tjvlm/t8hvG/gervqP0tnHs7893PIeZ9X6PM+3SvpyDZ84FNjtfP4vDokSvQB6EPEjg8bhCbJ45kC4xPsPimszyRYZHOVGVVaU385tbWuBkzeidh7vdAjTHHcJ3RxqLuY1gsDvGFM+Z8L75Q6CuSadL+09o7jlKfD/lkDpD7M2yMe17sSRBMOAbCSOaZCDnWPcbwC6ANOgf6wNNUf/IsBeGp2+cAYar/Pf3zbfrn+XyC+ttjf+1vdLfHfKYDcz7d+0XKXce4h3ivbpoTYv372eFlLKkTl2+9iy/RZ3IEoigyBzh8USSHL0tn8SQp5tqWyc889+Inbe0duV3dPRm4D83GuscN54sLWWdJvifac3gCN7FcPl4oVsYIpWZ36l/Bf573nD3+Cacyj3UjSnw/3Ufu43NijlLjovbckEMgDjsCusgfwBB2HoyoP4l/o1+//tqxzDrA6L8PBLg36M//W2nsM/G/kTLuGf1xbfV7n/azcIx5wv2+RXuvFZDp9Sxq/wQkeM48SK43yq/qd1+HWFw3kC8xVvKEsknog0CeQBCIa6xfW2fvzKraLujo7j1uqanXSOSqBLlKWySSyIKwFvXAOUD9IBBJfMRSeQjut6JUhtqB9l5i/61R7rnWWur/xa+l4w/QezYk7ygiz4Ax+meM/R9AF3wBDEG4BvvjGux9mj5DRPRXOOgvcNC/zEH/PO9PUXtG/3TUnmhO9E/3f4+xPuY8o326z0om35O84/k0jf0Er2l/t3Eum6+iR7mmbqBIbrFIVdUdYrm5GPNPEYtrWi+QVAH+G1hqW0+ZqpsXy1SaTJlckYQ+CEYfkL757gKhyA/XiFCFpuZfrjn/rMHy2viVNOIYyCacA23Mj2CI+gX0ET+BIfQHMAWdB33Ad2Cy6W+Pf7v+QqK/y17g2Op/kntu1f8TXHeZz6H26f4b7d9C7d9wyDvLaOwnec47Guc1bew/unauiGE9SJXVd6sN1hKVoX6TVF33o1hpBWISZQ1oTI1QVd9+QaGt3ydTGVS4J0jmCUSBDD+C6y9XV6b+V4S+bUz0ZXp7J7g1jeN4br1KnkXURV4AXcQlMIZdAjPGvnH892AOOAMG39Og8zgDqrE2/ccd649/1J/tupveByK1T5Hn51T7PG+m5klH7dN8f097Rv90n1V9sZ/ptZzm/RTPRyHRc84sn7C4f3oPjIyaVoaIKxRXxpmsrZ9oTQ3HUe9fZepaUGjqQW1oAPxvEMqtIFfWdQgkuniRWBaOcV+m0vfc959T+R+PVH/aysAp3/W5VL7bjgsqUucHfwfmkHNUe3vsE/01bnb9T4B8LLMXFrrsp7mHxH8ZrT232PTfYNOfxP4HVH/qg9u0d4z9DK+XIQ3zPsk9qR4PXZjkNeUP3QOzD4GEyd0VHP0IzOUNGgPGO+ovVVlBJK8BgbQKsL68IpZOPsfjdL6g0jWaNYYe2k9Povz99eW/MdL8F9BjtvvThXyXz26Qfa4G11qiu117rfvZ3+SePv1dmdqHxD6j/yaqfZb3epp3HLV31D/V9y1b3n8DtX+V5p4MzD2kn1yi56x/q0EuT6ylx7wS7QCxtHK0UGbGpbnyA6GsGqhJK/GIeUlRP09vmkx7eSq0Df/wd/43RqIfxSU4RXk1RVWMXXOe1PXasd9SzUnc92v/bX/s23MPvfewg2pfiNrnem1k9Kefe//w97X3edcW+6sgldY8L9Pck+axFJI8Fswn1xLj96/33XccbL6s79xibfU3VrfkGKta+va3fKneSSg1/o/+xp85JvpSXIJToKfBJc/5xaWsMR+DfMxeUDsfAbnzMRr3jPb966499sl786Uem5nYR/1J7BPtSd+B39OexH6691u22F+JOeclSPf4G+b9RS+FuzffhT74Q3n/nw02l+Nkqf/tvQHTH+gl/1eMcB+GWRDsabo73n1mfObYv+3njlwP4lG7qC/Ezvvonpe8N0z3XI6xj/oXenyG8f9JX8+HVJ811G7R3q4/xn4aeU8bY59on+y58KmJbtV3uHo4Anz7y+4o93ZtzphnD5WOWvMLezR5H3ILcMZuo+9/lbtsgRK3zdSI9jmeGyDHC2t9LxL7a/r0Z8wx77xB806m54sk55zFeKd9ueP8pztF+v3n95//143igXfHuHWXpY1dvChr7LO7sseugPyxb0PhuPegwGUdFLqtg1z3DyCL9NfwXI255Z0+y/B5mxrJN2neb9K1lvQ1Rd0/w7W2Gddaer831q/tn13F/5fD37efnzM0MXoA+mFknOusgInu0yvjXRa8leK66KcU16ch0/U5SHd7ATLcX6R9x9I8cE1Fo//N6I321JZUjyfbUPfAVLeHh9l/71tl8Je8tv8bx93+w3/7xcR7BkZ418bEuvfwEjx6TQkesy0Jrg+YE9zmGuLdekuiPBt/s7+P8fv32E53xr8/gn0Vf/Ul3Bl3xp1xZ9wZd8adcWfcGXfG/7UD/qvjSHf/+Yh1fadX7+v/8mWHS/rIqf/7e/Cf+n70Yvd1+3mG04irttNfO53upuc37oIbV5ycrpDzn3rg5hEnp1bym46+1X39I9zqZeD5lCsZ0IPnPvhzd/2ScRm1cPWhP+rzC54PHUHOpy29Qc7xkq7c5bTjNJ7fg5J9NMDJir/SyfaQaMZFh3P89U62G68+t5879dDzEY7n9JDZ03+eRf//Pvr/fsw58yvoT9nuLfo6nDv9nfO7evr//N0Ol3Lfhv7zuxy+7jTL4dzhZ5lvdPoDI9PhXOVwXutw3ulw7ghC6P6o/5wKaj+/4XAOjuc9fdeWAR/1ny91OI/uP1/b/z13f9D/syPW9J93T+v7/d2w4LjtFGNiif0HaUj2fbnvfAeJ7Hvo6X3k/CLzVqrPZzQomcs95XB+nEYr84d+dTi/QaOdXg/c8Ok/vzgC7H8NrtCpcoS8JIxbcn6RubIp5PyK7SrpJCCXY58l5KX2zyqnDPv5RccM4OQwDf9bg4Bp6au8i/zP1+nOuDPujDvjzrgz7ow74864M+6MO+POuDPujDvjzrgz7ow74864M/5TQ6GpcdGZGiP1po5MvWFKhkKrC2JzuB58odiDLxKPFCilf/Ul3hl/4tAbJvt1TJlf19wx/fHGjp7Xals6l9Q2tT5Y1dA8q6q+caalpn5udX3T/Oq6plZjpZUtksjSxXJlIvksrkylGUp+B4vN/atfxp3xL46GjmnVTe3TVrX1zHitqr55ZmVdUw/aZPR1e1VdYwtas7mmrslYZW1GazFWW9ss1vqZDe3dlRKFKloiV8ViPkjAOEjEc0/yOwvyC//ql3Vn/IMh09UMxjneVtfavQH9PFdralgqVdR9LJLVfiJR1L6iNVc36SxVVXpLdfVtZjVU1tSjtde3dQrFMmWIRKYgFsYTiNAE0UKJNAXjgvZAEEjurBH/W4appp0+v2C2tk3Rm1tOK7S1z+pMNZWGqiqJQFL7OptnBY6gHoSSJpAr26Cypvvjlo7ulrbObktLZ7e1sa0Ts0CLFfNBnamylod1gK9IqvDHeU8sgMsX+pPPo+MxmMcXTBBL5QlyrX7QX/267wxmmGqbI1WG1u+UuobnLNZaBc5llbGykiOW1T0hlrWA3tAN1rpeaGyaBbX1s6CqZjqYLVPAZOn6qq6p+4HpM2eap8zobZzWO7u7ta09otpaF6JSa/xw7nuT3jMcLo8xnsCHyxMG4DFEIJIkYi6gn/+u4P53e3LcGf1DqW/plMhbQaJo3FZZV803VldWyNXWdqW6bafJ0oO+ngp6/WQQipqgvKIWiktroKDECkWlVnpeXFYLFRzr91KV9ZmaxobKKmt9bkNjUwTGQai1riFQrlB64rx3Y3O5pDeOOx49MRZ8MAbG49cnCKXqBHIdpRzNXy3F/zdDpmY+AyxTNa8RSiYDT9QMbH4DsLhMjtfqO0CNxhc2QEmF9ZfCEsubJeXmGRUco0gkVU3SGcX++kqhn8EsDlBo1DF8sVnA5Vd388Q1SyzW1rk6c1WuUqMPwzwQ3NjUHGiyWLxxvrvaevK4kh6QeE5ygh+bywmu4Brr+NKGIYVldz679J8eEqWVHsXylu1cYRP6vQ4q2NXA5lhBJGkEqbzpMl9gfaWCbW7gCVWxWiPXQyqXh+F+bgIxUtOLZeo4PMZLFep4hVqTIFdrk+UqbRrhrsg06gSFRpes1OqT8GsTlGptMMaBv7WuzkcmV7iiz8exOBw0HokDD8wJvlxhiVdxmWElW2Qeklt0py78Tw/0/UaukMz3WihjVQOHX3NaIq+dI1eZ00XyknFiqXQ81mihaOFimTxSolBE6ZryAspFnMTc+HpLTuS0uXnhsxdnRcx8JjW84+X46Mq/xcTKH87Kkmr1FkGMTCFPw9jIJhwMlc6QgOt8mE5v8qutr/dRa7VuXB5/LIvNIeaCceCOceDN4ZX7ZOfrvmOxLcNzCiR/tUT/zw70/WNcQQOUcaqhnF11UCw1S3QGmTvWauOxVicWLJLKQkQSRbi+M9W7IEPPKg596vmygDXn+b7bab9u0jNaHnYE5IFM32ih6+dQ5LISEj1nQkiQCaIiNOtTUxWtWoskDveBOQq1LhVjIEqm0vjV1tX76gwGEgPOpD8e6ceM5oZx4J2aqurMylWfJY/a/r+aB0zVlr/k73KElcT3RRxBI53zbL6501Kp9MQ8Hkj2aUKxNAAtCM9DpJ1BXkVR89orAteelUYdAmX8L5Q1JSsBUFUAKLnMUVEEIE8DUE/6CeQhJ0A45kvIH/YqhLm3QvB4I0SEaC4lJUln6Yxycv8nU6bUxOAa4GetrfMxmEzuXD7fuYLNJkZygZtEWeIcH6+C7FwV7QkoFFv/Eq3+U0OltwwWytRRSl3dA6aa1k8sda3b9JaGM9amjgNVDc0vW5va2q3NbfRz9JbaP69/jEBa46Q3Tx7Ixxq+glN9TSQ1FCpV6vFSudIf6zJfND+0QHmPr0tZ1MImftD2y8TnlE9GOL9chj1O2ctCxsg52/ZvWiHDJ1PGXgKJ6xEoG7oagkZ3wnh/EwQHGiAsVH45I1PcKlNKk6VKdSz+XRoDeqPRjfSLrWCxx5AYmPNg2KCJk7S/xCVIMQY0a/40Af6CYaphel5ZG1vubezoUdS3dS2prGt6pqapfT36+jW13jhNpa/dUdnQ9rRcbZ6Jfn+lurHtDYyBL2qaWpc2tE8Z82dej0DavJLFrbomURqysF4Lwnnoi3tzsj/3EckE3qVp1jR+0Nbj8qSrdH5Tn4v7uebE7MztW0zCfB9hQmvKAKSxmCtcjkLJkLfBZ0QDBPpaIDBAD6HjlTBhgmitXCNJxNogFvOBH+4PfXR6g6s9Bsh1TozVwaRYDcTFSSGnUNv5z17X/7ah1Bro0VDTXIS+noG+fhLrYbOltnmNsbplj1yj78b1sEVrqpwjUVbt4ouV0sr6tu34b5+1TZnxWce03m3dM+fuxng5iDGR8T+9Hp642kmkaJ/E5ltBKDWycA0er1CqfPhCoSePL/IsaXcaxQ5Z/Zg07jvqdzrP5QzHnq9njoStTuNA2j//7Ub8T4yck5ghjFN19A8gGrMXMge9CB4ja8Hfx4y5wIBxoISwMNEhuUpC7gOTtcC3tr7RR63RuuB+cHRGhth3YowWYtEmTJBAQpIUCksNibkFuj+lV9d/Y+hN1R5ac3WV3mw2GivNfI3eoDJYarsNVbXPm6obl0uUmmpcB6tkKstsqdr6hkxlqpGq9O1qQ8MBQ2VNL87/5zEGNmEM7MTjt209M8r/p9fEEzfu5QrNC2QKRaBGZyDz3o3LlbqX5HHHC8P2HSZ5nviO+FeI/pZX4Tw2MudcKfNvZH5r8dwoZRjCWi7haGPOFzIxY48H8n1KrBXUQSeBM3gTRN+/AJxHYg7wM6PpIdBfAeGhwkMqvTQB80AkrkM+9Q2N3ng+Due+cUI0+j5Kg/5XQHS0EJJTFcAVWQfnFP7vrQflGqaflt5cXagxWUxsnnFFUZn2ZG6BCorLdJst1vpHLHXNK3AvJZVrzA3of71cbayRaxpWoC8miWV6HeaFJpXG0ID7pda2Kb2bm7umb2jtmb6hc9qcQ5baznhDVc2/fF1k7gskrdlsQfVZtVaK663ZTyqTu3H4IhdO9OOp0rgzdH0n+ZuPc1xbj1aD/sd5z7fldD0eCTdWlwcgmXQDxBN+AknERZCHfw/iiPP0v6UpTJ1Ac4CQiQ917BUQj/4Gcge+DB5DG8F9nAUCsB7w9dJiLpBBRJhgs8ogm0T3hgYDjYH0dFNvLPo/Gv0fhRYZKYSoaNGv6VmqvX+2z/6sIVWo6NFUVaPkCE0vFZcaoKCoEnJzLZCVpYecPDVojJYZ6P+lWF+XYZyL0NdqtcFcKZSos/Br8ZiTJUqtvgHDolJtsL6tMVQ9R9aNyTNmr23s7NnYgeuCk1Phv5UDuaK6zQKxRYhrTqDBZPHiSgtGc8LfFUpSLtN5zVUyPq/rQL/hvBfLmTlNuM2ERyuNv8H4O/gUyHxOgWDcSazzjwJ/1GEQDT8E4hGHQTjqW1CEX+iLAXsOUAaeBPZ9H0PofbNh1DAzeHkw64CvpwYCfCUQEyN8RKpUJmI9GGyutvoqFe2zUlKMEBOthuhIYmKIiBDh94khK08768/0258xFj/F9EKVa6pnsLiWa4VFtVBa2ArFOW2Qm9YEmak1kJqmAp7QsKqyruVpgVhSgnVPOfoaHa7KJu+D4XpQTua8Sl81X6G1PIi1goXEAsZHD+b+Dxs6uj/GGNiMxxn/6vVJVR0uPFHNN1KFyKemtt5PYc0YzQ/ZpCMcYZKzxTjPW6YATJ6GvsZ8Lxfbcjv6XTHpGshCvwdpAONz4m/esL0guH8PcAftBPa924E3cAvw7tlKTTBwB4iDzvXtCQjvXIlxIxjyJSTfswSch1SDu7OF7gn8PHWYB5Tg6y3COSJUYz5MFMuUgVbrNJ1U1gaJ8Tj3IxQQGS6EcDQSA6QezC8yRecW/u+qBTjCqtdKy+qhvLgTWPnToSx7JhSmTIOc+C5In9gEKXFGyM7T3MCa/ikuX5iH/i+Tq7QV+HoTcN5ztMbKmQqNySqQSLJxD5bBF4lTcU1IEsuVxTWNzXNUusafsSZ4F2vDw2Zr27jcYsMfvjaeqPZhoaxSimt+QEO7xUMY8YWarPVkjqpxrs9+CGDabDxXM3mesL2lcRgHYedBgn6XjD1F5zd3yF7qc+Jvzt2boeKujVDutB5KndYCy8G4Tu+BoIiJLbInlE+8DhKMm5wBr4HHoHZwHVoLAR5WCBtfCT7uGvDxlIGfj/CyVCVJw/wUyxdxXKqqpgOXUwuxE0RkjcB6UUBjIDyC1gI/k9c11lXwz176f2Ww2M2PcMq6gV04Bzj5DwI362EoT30QCifNg+zIXsgI74a02GZISTCS/tRb0a8FuP8tJ/fOVXqTSq4xGvhicY5IIskSSiXpGAPE99T/aIlsLn9iGVu/kSeuuVTT0PxOe8/MfykH4s/tliuF3i2tbf6KSZ/zJEXMWl/dAvD4MwC9c9BPSmbOk/VdHv0zyINOg9DlDIhHHgER8ft9u6jf2QM29fm8xOkDqEBflzuthjKnd2411xPU/2QNUWFdIB17BgoGrIbggbPBa0gLjB/bBJGB9eDvWgN+7pgHPCUQMl64Tm1QJSpUihCRqPmDuvpeKCo00PkfGsqnR5IDiCWnaD7z9dCqfNyMfT3VfX0r/nzn/iNdSx+4i8vuzuGW9AK/4GEQ5j0JguyngZO6BErjHofCyEcgP3QBZIXMgsyIHsiMa4HsbMNWuVJZiDV4Il+sNqSnSacmxssejZ2geD4qUr46OkK1KjJC81JCnLKLIxAliBWCaK5QHCeVC8PzipQreeKqK82Te/b80WuUKBrj2XzzNJncEFhVvrqQ8OaJ75u6AZa/jL7vxTVeydR2iiTAXI/rt98pEI3GXD9sP/AH7Qb+wG3AGfD5b/xO/Fzq9Bb+95sO9jq10gGf0hpQbIspJeYR9j0bIPauxRAwqBOCRkyGSO92CPNuhMBxGAOuBvDxkEJikqhWrtKnFhabKjXqyVBTMx3S0rQQHibD/YIac4AS60ExTJwoh4hQI/hjHYl2LsBH+zRafZCfNjU6QnaPowZBQX9+XLCk9ZTtyCtceFOUvwREuctAlLMceOnPAzv+eSiLWAZFIU9DftBjkB+yEHIj5kDuxClQmFvzsUwjTyjJapqR4N8Jif5dMNGnG6J9WiDUvwaCAoxYE2Ft5GWkNVJkhO5zoZgfzRWIJ4plooj8QsWzYkXlsfapM0tU+n/e05strH5MqZH516imhorSrvf5ftVqgIcw71dhzjeUoX/iboIC122xG875EUdBeP9+4Ny7E7h3b6F5vszpI8zt/X7/e74vdnqNWqHTMuCj74npSoCyffj3bIck/HrwfdMgZNB0iHabAnGBXRA6pg1CnOthvKsJAt1VP+vM8iSDRROaX1ADdfWz6XMHCZN0jP/DSE2AFiWHlEQDeHvowcdND74ejJGagliAN5qPdjP+22LMEd3jfTVGjI1CtCB/b9X/+Jmju0ekOYuKHn9Tmvvcr/K8l0GSsxLE6a+DIGElVES9BqyQl6Bo/HIoHL8UCkMXQ0HUg1AQPwvKimt7y1N7WrM8HwVime6PQqrbwxDvNRsi3CZjbWQCj7FG8BxnwtdlBD8346/BgYZTWLtF8QSiiSIxP7KgWLuTPGz1R66TJ6paoexycpGn/HCC5GNrG8D7awGWLgWowX29mdynib0G6sCzIHU+C8Jhh2ltR+o4ssaXO22gazrxOzHic7v9Pd8XOb2Cx6eBW2LzP9YTaswront3Q47TixA+cC5E4F4gZvQsSAmeAZFjuiFsdDuNgSDcH0YFaF/QGLSJxSW120tK66Cr60EQi1vofjA8VIv1gA6iIwxYG+ggJb4K3MfqwZvkD7f+o2NM2OPi9hjx96LnN/F4Ho/H0L5C+9LB9qIdxX//Dvcq18j3h3k0QLgb1qaB00GR/woo898EVd67IM9aA6Lk94AX8y5wwnB+BGEeDEQdgpdDacTTUBr7KJSlzb4uKm5nF3pgXLi9AAWuz0O2y3LIdlsKiSMXQejwqeA+2gJuY8wYA2Z8LbhPdjeQa/41NNBwVqrihGDtOBFrgjSRvGr3P/O9TNM4gic2TZMl7H2U1OKk1nv/I4A3VgE0mjHvk/o+6hqoAs6AZMwZ4A/9BnjoIzJPyTp/+5z/fd+//hvfEyPzn5V+g/qf1JPqkEsgGbgf8vF7J96zEGIHPQhxox+A1MB5MGH0TIgc1Q3hozsxDpr/D3HvAR9VtX2Px/dUenqFkEo6PSG9914mmSSTSU9m0jtJSCghtNCbXRDb08dTERsqCqJIkd577x1EQASR9d/73JkwIPptn8/vn89nfe5MJmXmrrXX3vuce86Fs2k1EhLyUjIya5ujY2uRmzsOnZNeQGJCHYZ5EAfk+0Pcy0kDlYL/wa6VsLZQUcyopaMOWA9PPv+fYIBFCewtajDKZBaCjV8VCNR/HbmRxH3k1yiOXoXC6O+QE7wWilE/QDb4e6S5fItkp6+Q7PwZkt0+RNKwd5HmswgZSeOnJ/b/7H6CJZ0/QrzF54i3/ARhBv9GQN/F8Og7BVaG1bAkDXCPZEPx70C9sqtVE+ysKuDuolqjLEhzIR/wTs8u++zvuE9XqPVk8trmvLBFsTnk73Lq5z9cDvy4DuhslTgpGH4fBQ4XBfeKPkcE9xn/3IYM8vu0p3D/tDyv5V7Lu8T9B4L/5MDfBP9cW5S434byuWP0+pfwfvZ1ePZ4BX7mLyLIdgG8TWdjhEEXhhlOJEzAUKNWirG6HfVtCvfE5PprEZE1aGycifHj5iMsuBZD3Sow1L0SwwdXw3NIHcL8R2Mg80z89zeVjk/Dk6/x87+CteboZ/QCooifKMP3EW24lPABxffHKIxYjdLE71AavxZFMT8iL/AnZA3bBLn7OqQ507lzXkU54EukDPkUslH/RmbUC0fTbL7Znmq15mGq5VqkWH5P+A7xJisR1fdjBPZ5Gx69p8O8Xx3MDSX+BxL/Qyzb4WUxA8MtOqlOroS3d35RWkaWZ0JKQXt9yxjrv9NAZrGyMSfg7kH2/a45wKatlPOpz6uh54UjfifuL0NhfFFwn/38binX6214zO//zP3j8f54zH+AGPJ3RqzeW93xX6nl/9nj9PPfwPe5N+HXczECzRcjeADrYCG8DebBS38mhhtMxnDSwVCDsVQvl1XJFXXj4igH8JjKlKmvo6FhFvy9azHMrQYjPOowakgDvIc2IoRqa3PDUvLOsv8WLI3VT/2+OX2fX3OifBSt/7FArP5niNOneDX4CgkGK1GS8K3gXZ2wEaVxm5AftgU53tuROXg70t22INVlPdJdfkCqx7dIH/E55IFLkT5s2bIUK+qZ+/8kgR4nW21EnMEaRNLfD+z1nhgfs+zbCAvyAK4D7M1qENT3AwT1fwsBFq9gmNlkONmpb1bUJtrJFVkBialq+d/xL498ZwfX30XlwFffAP+inD+afCDfk743iGo948vI6XNM4v6ZbdS3r/tb3p/kPJH4TtThnBGt974Gi5Eag27/Vzv/CuU/T4j+IaDHe/Dt9S556TsIHfAGgkxfh6/+y/AxWIBR+rPhaTAVI0kDw03GXhk9PdkpJa3hQlRMHf2tNixc+D5UJVPhPawBI92a4OXRDO8hrfAb1oqhLnWCOwujsv82zA3LH3tuYlCCoaTDBP1vEK//NRINviWsRrLBD0gxXIucET8Q9+ugSt5M8b+dYn8X8oJ3QuG1G5nu5J+uO4UG0t03QD78O8h9VkAR/q+1SZr75fI9owUG7kCS2TYkGK5DZL8VCOz5PkY99woseo+GmUGVyP+OlBdDbb7n+0CL+xwEmC2Cq/kYDB9W1JqpkI9ITC3N+Fv+wy+Ken/Bi8DyZcC4KuIhiPh3vk6efwXKvseR9dyex7jnXj5Vh/cUnXhP0uFcN9a1nEfp/Yt0ISFa70XR+ymzJP5LB/2KQuKf60n/Xv9BQJ//IIS8NMTyHYSYvYkgg0WkgVcfaUB/OkZSPRTg2jglW1lblZDcgIjIOlRXz8RLL/0HWRkTBfdebq3w9WiD35A2hPtOIN9mPlXE4yNOzeixqX559/GvYNi3FP793kZavx+QRHGZYvAjUg03IM1oI9KNNyPNfDNUKeuJ+630mfZAFb8fJZH7qW/eB4Un1cxuByB3pfrJfafIBZlUD+REfXgpdeCeizKb/Ui2OdiNVOu9SLHYgXjjn8j/v0FYjw8R+Ow7MOvVBNN+VaL+76NXD7cB/0Ggy7fivqUhVh/Cx3QhnGzKr1fVJ9qnytVxT+M9ddQ2vVzPMwpt7L/7LjCnE6iK0/iw6VXk9j0p6nHmXkZ9vW7MJxPXWiSJWv5RzOvyHa85Mt9RlO9ju/GW+D7PHfFYciXVH2X2d1D0z1NiXCCwzzIE91uGUFPSNdW/oab/QpDR2/A3eAN+Bq/BR/9F0sJc+PSbBS/DKfeqJiS7paQ37I5LIA1ENaGr6w0smL8UidETiP82BA4bjzC/DoR7T6Q80AHjvmriU93Nq0m/sj8dGcZ9KgT4sUGfEpGHk/r9SLyvR6oBcW5EcWy0DXKTHUgz3omioE0i5itS90OdfBSlsUdRGHIEVF8jewRxP3Q/5EP2QT5sF7K8NiM3+DtkDP7pYJrdUQjYnniEAUfI//chwWQLYvRXIarHVwh+5l2Y9hxN/NfAluqZnnq1QgO+bj/Cl2oKvq9SsNl7GGxC/bB3ropqwLC/iv2soHu7Oe93TAJenAfUK7jPe4BCa8r5+qeheH4/8b5VcC/F/GfdfMc9EefaWI/Vec78MtfMeyTxzYihno8RQYjuuU6aA6R8U0+6K7e+LfhP6LERIZRTQw0+RbjZCoSZf4QgE/ICo38LDQQYLtF4wcvw058vdBAyaNwrivyqrKSUBsTENiE5sR2LF32KmdPfQVz4RIT7dSIueAoSQmYgKXI6vN3HwbxPFcz6VsJUw/HfoW9PNWL6rEByP6p7DTZBZrgVGYY7kUa8y0zIH03J1yley2TbUZZ2ABVpJ1CeeBqqiFPI8z+O3FFHKP4PQDFyP7JH7oXSZ4eoB3ODVx1KdziJNCfpXrsyF+nIz5OtjyOx/2GRA2LJb1J7bkAonVer58eIGtDWqgo99EoFHMj3vT02Cg0E9/8MviavwNm+7EdlQcnwp3Gf73jAPitFmsObOlXy/WLy/eJB5PsG55DV87Dgnmt85p49PklTv8XrxPWTRy24h48SXrBE5HnmO5yOUXqvC4QRYmyOC/7rVUBdBHmA1U3kPXsacX02IpjqqECjFYiw+pb4/wShZsuEBgKNl5IG3qXX3iQvWAQ/w5fhz15gtAAF5dne5AEbUmUtSIhvR0nRDHy8bA06O95EbNhkxAXNgCKDevLciR/nyOfcdTFtx4DejaSDOvLUGpj0roRxT0KvChjqoE8PFaJ7f4H0vj8hTX8r0g12QG60S/CeYUrxbHYImWZHUBy1A+XJB1EpO0nxfxbq2PMoCj6NXN+TUHpSDTXqEJSjSAN+u5AbsgV5sd/8HGGz/Ve+zy9DNvgksgiZBLnHCWQ4nkLygBNINd9LNeY6ZPTcLXrjAc9OQP8+zcR/A3GfL/jXJwxx24ZR7j8hgO+tbLoU/PnkyoIRT+Nf6XdzHse+inr8zrFANWmheMg9FFDOz+51hDx/h+A+jfw+WYzVSbxz/pZi+l/dsa31dG2MM/j74fQzzDtzHar3qkCkOL5MmI/UkHvIUgKtxH9tAFBhdguKnicRa/gTQky+pZy/EuED1iCU+uBg889IA8sRYvoRQkkDT3qBv/HLCHeY+VluUXlCasZopKa1Izl+Asa1LcLnn21AS+PryEyavLS4dMILFXV1VdWNYyZlpy78xbZHG6x7tMKKPNWiJ9XVPeth0qMOxs/XwqhHFfo9X46oXp8jvQ/1bf2o7zXYRdxTvJvsQ4bJIXHvwQzLo8gedBQV6QdQlXES1YojqEg5ckQdcQEFgWcp1k8h1+s4cnyOIMfvAHKDdqE4di0S3bbdCLfZTDnhPOWDM9JRgyyPc8h2PQ2Z7Rkk0f9I6LdJxGQ8xaL9P6ZgYJ822Fg2avgvRi/i39byLQwmDfi4biAP+ALexgtha11aZ2Ob+if+s8Mf3uTYq6K4byyhnO9H/Z7DVeT1OyXqPfZ8zvVSXpd4Z04jNN6tBcc2I/wpCKHXQ0Tcv0jHx8Ea4NzP84lt9P/rR1DdaXQLWVRzRJhuRrDFGoT1/x4R1t9T/b8SwZZU21h8Tjp45AWsg2Ajqg3IC4JMFyPU6jXIk5rS07Ma1qTJ2pCSPAEpiVOwcP5H+PzTTZgwsaOkpLyqsK65rWHeK298M27S9LdSwxb9Zqc3DvbPjcXAZ9vR/7lWQjMsnmuCybP1IufKehNHfbcT93uQaUS8Gx9GpilxbnYCcotTkJufhSrhACrTT6BWeQKqgvemqmOuoCT0PPL9iX/v08jzJv59D5Pn70dhBPWCwevux9mfQIz9AaRR7GcPv0z14BWB7OHUdw25RL3CeWQ6nUGGBeUC/Z0o6HVCjLe46s2FQ6/xsDbRxn+h8IC+/6iEu/tBjCINsAcEGr8PF8vG7U9yn+Nw2FZB/TbHXn3Fo9jPNb8sNJapt1lwzzleivO3uv2bEap5HqbxcSmetXEtPQ7SPA8kBOstpOfzH0O45VYx9ze6ARhLOajGibzI8BfIjY/Ta8T/gB8ROvBHcc9mcf/C/quov/laeEGIxguCTanvNfk3wkzfpxzBfeKbiBr06umCiqLEjKwx5AETkJo8Cdmy2fjP0m/Wff3tquntEyaWN7S0VU6bu/C1MR1TXu+YPOfj8GEvPXB6phOO5Kt2/xwPm39QXvhHC+Kf+5Zy7k/I6E09e9/9yDQ4BIXRMWSZnoTC/DQyLShG+5+HctgRwX2D8hSqi798rVx2+FJ59GWqBS8gz/cscv1OQel7HHkBh1AQtgslCauR5n4aaeTxSc7HEedwFMqRvyDT8wblCcZNKIdSHh5yBQrX85BZnqceYy8UxD978giKH4denbAyqtHhX/IAG5sv4OGyC34u6xFo+RmGmk6C7Sif5x/3/hv1PNarVEpzOyUhmn7PiPT8j91CYxz3zL3Ety63Ep/aOObHT+NXC39CgN5sAV8NPPUmir4/j/73vMnEP72XGpuHKLW6hkSqecMHbkGw7XoEO65HkOMPCLJZi2Dr7xBk/Y3QQJjVCtLBZ+QRy4UXhJj9R/QIYQPeRYTNW0j2mtuZkz9mqyy9HelpnUhLnA512ZQP1vz444wvV34zbcKkKVUt4yY2TJ//4jt0fHXanNdX+fWfDxfyVednJgsk/uMbyJ79CbIeO5DVm3K8/kHp+iWTU1CYnUOm+UUoBlxAzoBLqEw5hpqc02go3vptecbOvUVRx6Civpq9v5C4z/U7gfzAoygI3YuS5O+Q7Sl5feawc0gffBopbsdE/Od53Ub+qDsCOSNuQTnkBnLdLyPT5jwyjI5A2Yt+T+8n+BAnTj2noL9R3WP8sweYGE6Ck9MhyQMobnyNX+X5rwrrgY/mN5WhDz5g78/juGMPHv4QBf0vI4c+I/997um5hpO4f1nw7K/h2F8DXY79yI+0YH599GYKeOtN12DaY/Drs1zUfc3NwAvUczaT/mrs7otrwBLtDiPYfgcC7TciyGUjIoZsRJTHekRTfxPtvhYRVN9GOX2LCNuvhBewDsLMP0a4xTJEUB8caUOatX4HuSXl+Yrc8ZCldSAjdRoo16O54bUvt+zYMvPzL1dO7ZzaVTtm/MS6ybPmvTZh2sx3Zy98Z6unwbyHw5+ZJ+auU6kH5Tltea894toGnvNSGp8W3OdaXIJy4AUxX13sfRK12SfRVLJve3nW6m9VUUfI4w8j22cXcX4S+QEnkOtP3IccQGnSevKAY1CMuCaQPeIKMkZcgmz4WaQMPSqunyv2u4ciXzoX3r+hcORtFJAG+LqqLOOzyOlN/bjeTjE/6vr8TFgb6fq/pIHezxRgkOtBDHbdDX+OH8qTzuajP38s98fgGJ//ojxpXrdw8K/INToHZe+9IvalMZslwru1nGu51ULLse5jLecjieNRelPoOIW8ahKGU7wP05sgMFhvNBJD7kBBsb/kNWAm1R8tnlSHuNxF3tDTiHc7hGDnnQgctBUhgxmUC6ieCXVeh+BBaxHq8L24j3w45bdo51VCB2FWn1Pup17R+mOEWX9EOvg3Iu3f/CZL2bxBoeiAPG0q5CmzkCd/Ex1t/zmwY8+OOZ9+sWLKpK6Z9W0TOqs6p88YT3ht3svv7k7us/oPvoaB5zWznt+JzF4HxPgnz3fnGZ8XOVI58JLgnsfGa7KOoKn44OHRRbuW1yfcQ3XofZQH/oryIIpff+rzAnZJcZ+wDaqYPcj1vIW8UTe7ofChGPe+ikyv8/T4CgoDyQcDSQe+D1Ay6p7QAM+587xrbh/KI3oHRQ849LlZT+W/h145LC2XkgccgI/LFlEzeZh1PHZDU57rYf75Gm1VGPHvcgM5lHe53+OxHa7z2PO1vDOvnsSrp048j9SAvzdC87qW76EazgcLvsfBQ68d7npjBDz1PxL/e/xE4IM3gQlUhzRT7VftfRt55JUpXvvh707addmBkOE7ET5sG0KGbBEaCHRaLzQQRjmBdRBmt4p0sBph9tQn9P+SeP+cNPAJIvp/hDCLD5EWOb0jWzEBmRmTSAMzhAcUZizF9M6V2Ljh5MlPPtnw6eRJH301cfznBzs7VuyaPPXDb196490NCqsff2f+ea4ru88h5OmfRK7hRRSaXkWRzWXBe5HjNZRSb1dfcOjgmJJT342mGqom7HdUBN1FRfBtlAf/TPXfRagjT6M07gDUKVuQ7/WbWB9X6ENa97uLgoC75A93UEBaKQi6gXy/C/Q7D1AeRb0Q8VLiL2mg2O2mGJORkw4L/3GSeqgvMPy5BbAzan0K/2r07T0Gw70OUw7YhQDKm95GL8DCIq97HIBrP669+fr8Ih+Iud3svvtFzc81H/s++z3HthdxO1InlrXxzJB4no6h/+wiTBUYTLnTnV5zJc5diW8XvRY4U8w76jXCTq8SPObA14yv+BR4jby/nby/gXqPmribyKdzlRSwD4HDd8PHYxcCB+9ExKhtCB2yDWHDtiDUbZPQQcigDQhy0HgB+UCEw3eItP1W3Gs5ZMAK0gLlhAHLEWH58a3MnPoPCvKmIiOtC1mps5Gdsgj56cvQ1bkaa78/i6Xv7cGkjtXomrQW82dvxsJ5ux6+uPjLfZWjfryqpHOtpJgrMriEQuPrUFlfRan9NZQ53oTK+Tb1TXuOjS24dbAhgfQb8gcqAu+jjDgtC/yF+L+O0rBzUEUfRoVsM/n6XRT7/0ac3qcYv4/i4AeC69Lw+1Qr3KfjbyiNuIXCkCtoJH7qKEarIoBy0oB66G2huRz9s2J+JIY82uvZlzHIpA3P6+U+oYFSqgNzMWT4PsoBeymPUswYvg8Hq6rua0Gy0qU5F3HdtucfKKAahq/h4vFdrvm0sc8xPYxifAjxOaQ7nqWY1sKDNOD+7ES4Ue3McH52HFyeb4fj82Pg8EwrHPTqYatXi/6ky/ChB8Vcwyvk+98Q/1Po8Rj6jO30eZtyfkZRyhkkhO1DOOXOgGF7SAN74D9sF6J8tiGQNBA6dKvwghDXn6g2kLwgSOMFoY7Sfc5ZA6EDqDaw/lLUBtGD3l8uk437vSB3JjJTCSkLoEx7D0WZn2Dy+DVY8fkJvLVkLya0b8D49q2Y3LEPLQ37UFa25W6d8tMtJZYnUdT3uqhNS2yvQWV/E+WDfnlYnXDpFsX8bzxuVR/8ELXk2WXEbxXFM8d+OfGojjmCcuK+lPxAxa+TRlThD1HGY10x0lg788yoTfoD9Sm/oyr+V5TF3BA9URN/n3ygnD1g0HXkG14V8+MJz6yH97OL4WYyXsP9n3OAnd1SDHGjHDBoM0JMlsPdtP17Lf/pGv4rxBzvPXHtLl/Lw97PfR3nfG3cS7yPg5vGxzmmtXHNcKLHg3q0Cb4dn28RsHt+NGx71GFgjxrY9KqCOb0fd4M3he+3jge2bQFe57qfzsEk8qDpNaSBspsolp9BfMQBhPntgr/nHowasg+erIGhuxETRPyP2CF0ILzAfbOoD9kLWAeBxL/Qgc13QgchNppewfJLyOKn/Dsnqwt5ObPJA+YiM3khctOWkwa+QHvTD3j/X0ex6NXDmDR+H0bX7IMyez+yMnbuzy1K96qYlJ1Wk/POm9UxP+wenXDkzLjM+2db6XPUhj9AFcVwXRDH/QNUB1Dup9gvD/6FvPsKyqJPUtxvR3nETcF7lYbzGvKKevLAJuKA/84YioEWpQR+3JD5AI0y0lHCTYyl19lbKqguVLlQjhggjc2lPrsdvs+9jcHGE/+SfzPDiQjyOwBvyqOB5l9jiEknnsZ/ybC7yKXakq/b5Z6PY59zvjbumXtdzh27vbxRxLad5rF9z0bBuU3PWsG5Va8KWPZSwbxXCWz6tog5Pvb9H38EVi4DJqZLeHEssKCD6oGaWyjKOoOU2MMIDdwLX6+9GDViH4YLDewTXhDhux0RfpIXMKS64CeqC1gHUn0YYke9IuWEUNvV1DOuQmh/0oHVt1fSM1t3FuXNgTJzLuWgBciOXyL4L8ldhdaGjVg49zDmzDiBStUByGV7rysLUhx5vZEyv2gEwU+dvfPNyZOXfzN5zsw1zePHLyzPWbu6IuIOKoN+Rw35OXNfEXILlWHXUB1zBlUZO1EZfV3wro1z9nXmvF0pjXmMpRp4YgnVQCoJ4+nxGPp+q+J31KXdRGXsZVRH36f/8QdKPe6g2E669i7j+f0IfH6puB7mOcG3Lv+FogZ4Xi8b8dEHRQ0QaLUGIwznw9wu206X/zI6Fg+7g1zqadOf3Szq/gDq9Tj2Obdrc/ggHd4l7iVPH0D53EoDc4LlcxWw6q0mzlUw7V0Ikz556N+vTFzfz2vGlpHnb90IzKigz0oxsLABeHcB9YBTgbb6n1GQfRbJ8UcRFLQPAb574eO5r1sDwzwkHXBtEB24FYHkBf7u20WPEOS+uVsHYszAQdJBkO0aMW7AOoh2+3STXDbxD3XJAuSlLUJuwn9QEPslimTfoKxkI/G+Aw3VB1GuPoy83LmjFLlFjrwvnSIvzyU7L8GlNu/C6jmTzq6dNHPuionT5n9S1zx2ckn2qs+43q8MuUu4herIq+Tf51GdvftuRcIVVEdJHq6NdeZ8fIHE+ZQy8r4KCZOpB5pIz9sKpGNXHcVEq+SL48gTqii/FA/9lWrOG2L9FK+xCO6xDF595gmu/8x/qfABn+Ffwtt9J9WAa+GvvwQDrVQFuvzzukyOf6XJCaQ8t0n0/Jz3OfbZ75l3e+Kd+ZZiXeLdRq9acG/JvQbBlGCimYPoS/9X/7kCGPfNhRVxn5cmrRF+ZTGwZxfwMtV8Y+mcTKdz8NYc4J0XqAfs+AO15ddRqDiH5IRjCA4+iACqA729yQNGSRrQ9QLOCWHe1COOoDph8Hahg0C3LYRNpIFNQgei7mEvIPA4YsiA9UiJeGlVdvKLaC5bjuKk5VAlrIQqfpW4RqsoczPHPdKy3ojKzk+2yysosc/Jz7fjPQkTi/Usxlf8/kdbza4P2ifOfGf85Hkfl1ZU1zQ2zZxYLjv8oDr2Z1THUdyn7ztcrn51QVXi0dNVkQ8F981yKd6f5J05Z3Dc8/M5vM5iqgQ+RwspT84kHXQUSTlANeJ3cX1kFvWfafoHEdz7C3j3fhV9/1H+F/zztawvwI9yZ5DNOgTpL4WjRd1M5l+eJq274vqvyPMeFGZnkNZji+j7vIl/9n3O68y3NfFtTXxroY15LfcmpD9D+l+GxHsf+t+MXvR+TKn24zW/vE6Y1w7t2AG8vUCq9SbR/32lQ3r+ygw6B83koyXXkC0/h9jY4wgPPyw04Ef5y8fnAPy9H3mB57B9mrpgH3zJC8J8dog+gXUQ4LoVAdTzBjlvRsAgqhEdNiDUnnKD7SaED9yEqAHb/lCmvLRDlbYMbZXfoDJ5FSqI+4q4H1ER+xPKY3eiTnb4dkfJue8WdFzrUpaG2lTX1rtWVLwQ9jLx0lq97t22CXPmtU+YvaiqvqG1ecyMxaPbp0yta3jh9dbOttmVDaOnNmVd+7kqgmq5+IeC+ydjfqIG7P1Ta6TzoOV9UZd0fGWyxP/sBun3Womrck9pnIbXWqVSrx6hvwo+vZfApEfVX9QApehJ/MX47oafDdVI1He7mbSt6eY/S1rHVTgKyOXx5b7bRe0/inI/xz7HvRXxrcu1FuYCahHzumAN9NPLwXB6X9xbctxPng1sJM9/+yU6D3Kp3+cxH/6c7PsdLfcp9n9Gad4VJCefJf5PIirqGMLCDlEeOITAwIPw999PWpD8gHWgrQ18R+yB1+C9CPbcJfoEX9KB5AXbhBaCnKhGcNiOcIetCLPbTj3iDsaF4pR/X6zLXoWO6nWoSlyHyriNlKO3oTp2L2pjjqEh5hJaU3/l2mRDdXOW6wuzD3Z+9SEwv+vAyrbxna01Tc3tvLdxdVPL5OrGxintE+bvam55bUWD/NofHfSZW+gzNiY+FPUc89+hyfN8ZI9nX3+xQ4pz5pprYa0G+Dm/tmDso/hvo/NWFcQecA+5HleQZnkaUUaU63ovpXzb+BT+JQ08T1z4e2yR4t/wY74mRKyJkyfh0borHv9xOocMw91ijpf5d6K8z3Gv5f/vNGCmw7859Z3hA/aIWo+57yQ9r/5W4p7zGPv+ZPr+dNL19Gb6Xu0D1KpuEvfXkCm7iMTEs4iLO0X8n0BE9DHygaMICTlCXnD4Tzrw85ZqRK/he4UOfKhfDCA/CCK/Cxy8GyGUI0JcCM7kEYN2I8JhN8Lt9iPa9gASXHYdUyV+cbclbyMm1W1BXfJW1MXsQUPsQTRGn0Z9FPXgUXfEe62qqxveWn7+A47/MaprqG2tGF3T2DyReJ/c0NyxiPLBlw1lK3c2pF4Hc99J/E4p4Zrv927+tWDeF7ZJNe9L4yUw17rguOfcP7OJ6oAqyTu4D+A6UkV9QIHnNSiJryizbQjq+zEc+o39i/iX4Ga5FIEDSCuGn8HDcJLoAbLC/riYJtdZdzHsJuRmB8S1/Oz/Ev+PcrykgT97AfPPMKb/7Ua/p/B9KK3ppfc8dRawfLnk7/z+2xOkeR7xeUjTTUW/Q537C/KzriE95RLi45n7M4im8x8RI2kgMvIkIiJOkBccQ2iopAVdTwjyk+pEf589CCYEkh5CqG8M1yCCckUo5Yow6oMjnA4hfNBhRDseQbzDcWR47jhQmbSa+q4dmFy7Gw3J+ynujxHv59AcfVXwP5o8fHThT+UtqpuHx5b/grps0kbh9tPNo9/6YUzb4i3NdSuP1Cn3/VabeB0TiWvmfxJ9tmkqqcfjep/BMTynScK8ZinXazGvTYL2+exmKT64NpikknTDtaO4Pir0oeg5C4deR6TVLuL0Cww2mvqUMaBHOaA/1YgBA9aLnx2hPxP97bJtFAE3PxR7t2jX3voAOTZnkd6PzqPeYuH/T4t7c22d3133FYo6Id76JIo0+z3xXiAz5lFd/zYwa+wj7lsp9nmMlOeay1Pvo5D6G3nydSTGXSLOzxPfpOmoM8T3GeL9dDeiok4JsA44L0REHBN6iCQ9hJMvhJEeQgIOIJzqRUYo6SGCEEV6iCKPiKX6McZrP2JHUNwPO4TkoYeRNPiouL4mL3Tz3qqE9Rgt24825SGMJu9pirqA0VHX0Bx7G00Uw43p5z5pKv75TlP+L6jKPo7qnOOozT2OesUpNGTtQm3UzT9Y1+M0/HP8swY497MHsO9zHHPsM9jTGZzbmWd+3KUB1wPaXoB7Qc4VYzT8s45YU/w/1P63EW9DNZLJSowyWoBn/8L/e4iarAXBVhsF/159FsDSsjgq23vDBIVmLw+x5of+dp7Xr8jqf1Ks4eQcwDwbUWwbiTyv1vi91vOLqUeYi3jLw6KHEHu6FEp7gsyaT3lyjnQ9GdctY+IkCP+iXJMfdRfyqJtIj72KxMjLxCNxH3aO4vssxfcZ4lbSgKSDs6SNs4ihfloLKT9I/hBHiCVNRJEeYkkPMeQP0aSJWNJEDHlEnP9Bqn/o6H0QiaMOIcHrMJJHHkWGF8f/KWQNP4/C8N17qhN/QiPVnU30vxojKPcnXyW93qSccBPVCRdQJ7/xR232L2jIvo5q+SnUp5+gOnHr/cYYPCCIOp91PkEp8dOh8Xvmi/nnOObajzG1QgLX/Lr1oO4YwNiiR9wzRmvyCNdNjDrSVZrHMYRYfo8AgzfR65myv4x/9oZQC+qJ9b/AqD4v8VqxqnzFjIlZgQ+692nT7scl5p4HnUZmz4OIpFrARW8CLMj3ub+zob5/MHl8ZN+1UA65KXjX7gPEez/xOvHJ06RrycR7T5F4b+a5jADp+iJl4K+Qh/6MtLBrxNdlRAVdoNgl7gPPkp+fJV+XwFoIDz9LGjgnfCE29qzID1pwnZCccEaDU+Qhp5BAdWNy9EkkkiYSSRNJERKSSRcpQceQEngcaf4Ev1OQ+51Bjt85KP0vo9D/Ol+jeagqZf1Vzv3MfyNpczT1dHXkBVWUi2ro/9dQbqpNPI6apH2oSd57lz7ffY77jiyp3mNv02qgXQPd+k+L8Zo6cOwT4HOmy7ngXXNknbB3cF3woub8ykedQqC1VNeb9Kj5S/55fGhkvw8R0O9TjKJ+0d6qqiM/66UFyrBvDyrkj/bn092bL9f3VxSM/Bn5g68i1/0q8obdQq73Q2nPNm28K6W9vnj/p7oWigHKbc1lklex17dSXDRSvKuovyikukXpcwepATeQGHgVccR7uP95yt/nEOh7luq5s/D1faSBoKBz5AXnyAvOCQ1wfoiJuUDcnyfuJaQS0pIuEM5R/XBOHOXUPzDSSR9ZiaeRQZxlkmdkEK+Z5CeMbPqbeeHnCZdREnEN6rCb4HG8quhfL1WnbthRE7cPdeQDgvuIIyiLPIiqqEOiN6hI2ni1LubGRe5hRC0jl/jn3M/+rx3fY9610K3/dDHmLzBaB+M1uWOmZjyI+wTuEblOUJJvBlNfE2L8Gez7tf3lGACPDzn3eAVBfZbDp+erPAawsDDz9SWqqoo2RcCdO9wHavdr0+pAoekNeI8ehaZO1HLOPs+8KyqktaJlxHm1Snq/HAfC70lD1X48t8T7etyDwvsXpPleRZLvZST4X0Q08RzGfBPvfqPOIMDnTLcG/EgTAeQJgYHnhQbCiauIiAvkAxeQGHsRSVQvpCTSMemSqBszZZeRlX6pG8qMi8iho4L6CWXqRShSLiCXdKKk38mNv4i8+MsooPguir8OdcJNVCXcEfMu2vhtybi9oybtwK7y5FUb1QlrdqsTVm+vTjhwtCH++j7i+yHzrQXzzpik4/tC/0+O6/8N30+L9wmafDFZh3vuDZbMksZLXyUNFCdeRYDzfoSZr8JIkxmaGvDp/Fv/swuhvZaL+Hc0b/ygKOOdfxerRnc2ZX+frAh+CK0GuvfnUz4O7f6NzDt7Pdd4fO0W7wlRnyeNbzZTjddOXj86SBqrUA2nXmXEHWR53kCG92WkeF9ErC/leuKYEUo+HOx9WvDPYC0wAnzOCQ34kz+wBoLIK0JDLyKGNBAXdUniP+ESZMmXIU+7QvxfJY1ST5R9FYWKayjKuSqgUl4TKKXvqem1UvoZFfUaFVk3UCa/iZqsO2jM/g1N2ffFOee+RJunuX7n8clpRVK/qkWHDp58znWftvYbX/B0frUer0WLBq2avKAdG+T6gOOea0LuA3ksgMcFeLx06SvAm1RfVyluIWzkfnGdpK/x6+Tzpd0e0Kubf7WYCzLUG42QXsvg1fNlOJk1rSrJfP/jYtWYqe8uXeJYl7JlQVbgQxHn2hgXOtBwLvbzVEo9Ha8P5msGVZrrxtjvmHfO8Y2U46tGEPdDH4ix6vwRN8X1TWleFKte5xFHHEcT37HEexQhwkfinxHi84h/rQYCSSvBpIGAgAtCA5FCA5cQF00eEnOF8v4VZKReJQ1cQw7xWqi4gWLlDZTm3YS6gOq24l9QU3JToK7olkBt4W00EBoLf0VrMfFe/EBceyziTNubNUm1+Zw6qdbiuYoujSa04P6eofu9SU9AcKmp51o11zg/Ce3Yb1fVo/FgBvcBzL3oD5qkMQEeG+L4/9dLdCQPaKD3H0G1bZD9JgQa/UczDlio4b/4Mf6foyOvHRvVcyHzv0WV9eGKkuIJM5csWeK05dDnrnnDNr6V6/Wz2LNZ1/MZWTxGzPs9aThvkEu1zmjy+VbK702U36s9gEqPe2KOiq9Xyxl6GZlUW6ePPIuUkaeR6HUSsYRobwmRvhK0GmAvYA0EP8E/I8jvAungIvV5VDMEX5Y0EHWF6r2rlAeukgauIzeD545+Jv6Z+1uoKLlFOekOGsooxsvuoqniLlorf0NLxT2B9orfqT/5A5NqpHEoPr88FifG4zo04zJjJd9doO3btZrg3k3Ty82uefT4v4Iut/x86n8BXf55fICvk+QxqDfIA14jHYypfIi44OPwd96JMNMv4KTfrhP/j7jX8u/z3BsYKfG/r0KxbLW6dNKsxW8scfzk089GfLGj0zjb7St1jv0xsS8z7/HD1yDx3g88d8ljDzyXxfVcnY90rX6d2x+odrkr5qVLXW6g0P0ylHy9ugfVW0NPI3XYCSQNPy76rQTPYwJxo44R/8cFYnxOCA2wF0QSdDXACNFoQMs/IzTwstBAVBjFfzTxTxpIozyennKD6tKbKKQerST3NiqKf0W96i4ay39Dc9U9jKm9j/b6B5hQ/xAT6XxOJc5ntEnjE3xOOZ8uniGB66vFUzVjsk/ogcfutBDa0IB9gzG7+dE4z2wNd0/r83X7fS202niaD3Tx32mT5sl5zHwhvbdW8o348NPw8diHsP7fwctsDnTXAvTWcC/xXw6PZ+diRM/ZcDRrOl6e88nqctWMua+//rr9suWfDpu/YKF1S3OrYWbsTKs06++O5VsdQaHtORS7XEKF61VUOP8sUO18HRUO9NzhMkoHXSRQ/+x8FgVO1E85nUKG2wlkuB8XvWnKsCNIHH4YSSMOIY4QP/IwaeAI+cDRbh2wBtgPYnwkDURqNMC1YRjVAKHEfxjxzwgn/iMCLiFSo4E40kBi5FXSwXWkxd2guv9nqvtukQ/cphxwh/z/N9Srmf/f0Vb3B8YSB5OIn66x0jrX+XQeX+qSYonzKc9FcW319jwpz749S6OJqRJe09GEVhcLNWO4nJ8XaPShO47HYA1M12jgSd4nP+H7nAt4LGCcBmJcgL/Hr2t00NUq6beeckoS1cSeHgcQYrcO/sZvweC5ikfXAetV4vlnytCT0PufFXB6bjKG9JoBB7OGU5WKz9dWls6d9fLLL9t9+NGyIQsWLLBuaWkxVKsrxf0341wXl6XYfHY3e8BGZJvvRq7RARSYHkWR2QnkWZxAvuVxKKyOIbP/cWRZH0eGzXGxdlVG/pHseAQJTkeQ5Ep8ux9G3JBDYtwtTkcHrAEJxxEl/OBxDURRfx71hAa0/EdQzx4ZcAUxIVdIA9eQQD1cUtQNpMTcREbSTar7b6OYarvyvLuoLrqHutIHpIHH+ec1B1ruFzPfL0h5VYt3dbSwZNYjX9DO0WjnaThfdI/Zj/3zOO6T3Otyrp37754P1NQK4zW1AY8JtBY9qh349Tb+uQppvZYq5wF97isYOfSgmOfiddIO+i3dNUBvXhf8rBp9nysjVMC2x1jiv4v5P12d8/UPFaULX1qw4AWbfy/9j8fc+fMHtLa2GlVUVPQtLi7uk59bKnQQ7vhCRdyg92/I3FYiw3oD5DbrkG6yCTLzrciw3I40qx2QWW9HqvVuJNvuQbLdXiQ67EeS4wHEDzqIWGfin3SQ6HFI6EB4wciDf/IC1sGTXqDVQBRpIJrqwOgnPECrgZjg6+SD10kHkgbS4m5BkXwbBRm/0jn6jXRwDzVU57XwmCpxMIW4mdXxKP55foI18I5GA3zUcq/1AtaALv9/x70u77qc6/Kty3U3x5pe4MkxgCdRq9lTOyf5N6H94eSx3u67+RoneJlP7x73FevCe6jQj2DQsxxWPRvh3mcyHMxrT1Qpv11TXvzy0jmz5w5897333ObOnTuguaXVqKqqql9paWnfwqLivkVFRX3z80v1WQfRw2Yo4oa9cSjR+SPK7V8izfk7JA1Yg0SLH5FCHpE4YBOSB25Bgs1WJNruIB3sRqL9PsRpdJAwSNJBEmkgbrCUE5KeyAmsgTgdL+A+IY40EKfRQEwA4wJiNBqIJQ3EBl5FfBB5AGkgOZzxM9J4bDnmFrKor1em/orCjN9QRj1eed7vqCt8KPLmhIZHOmAv4JzKOmA/WKTB6zMkcG3ANQLjpclSDp73BOddGr6nNkg+zXXlRA3n43T4btUZ59PltFEpgdffa1GXJYH34alK1+yrnaK5b0I89ewxd4T/DaM8O9Kd+kDbH0UO4DXMHP899Rph2KuEoCKUwax3LZz6jYetRc3+KuWa79QFix68+68phkvefMt51uw5Vhz/xL9+qUrdr6i4pB/5gD6DtGBUXKK2mPZ6rnlR7kxlis+iHQnuS8GekO62Gim23yLRag0SBqwnHWxEkvXmbh0k2O8hP9jbrQP2A17fwTqIH/qoNmANJOp4QbxGA9wvxvhIGogjDfD4gVYDMawB8gBdDSSGkgbCbiIlgnwg6hZk0beRE0+9aNI95MnuQ5X5ABU5D1GdJ3loG3PUIOVUrgVnj5fAHGt5ZswdK9VfM1ulfoH5fpLrCTpca/275SkxrcvzkxwzeB8UsRdOkgSxLyLfOyeGeKcaPDv8ITLC7yM25BbVxFcxjOrqwa6cA7aINZEe5u2C/+f06mFEcW/ctxTGfcrEPgcOBq3Ef9XmGuX6rysL3sWSD7P6vrZo0aDpM2dYUf43qqyqZv71yQP0S0pK9ItLSg0Ixip1mSXfn3HS1Gne81+Z4ZWbPSEh3vvlLYmDlyKD9y8iHST2XyV0kKLRQTz5QaLNdiTZ7pJ0oOMHrAOuDZLIC7g20HpBvKZX0HoB6yCeNJBAGojV0UAc5QIeR4z3u0THJ3wg6AZSQ24iKfQXOt5CesQtyKPuICvmLhSxv1GPe4/08DsKUx8KH+Xxq0aOxyJpHTrnV567mlDzOHQ51uW5VYfnv4pnXY55nyuGLs9ajhlFMRIKoiS+eS/WLOq7MkOoHw/5A6nB9+lz3UV00E3qky8R/yfg6npYXOfH17rymm9es/4c1X8mvRrEfle8z4mxfjlsjBpga1Wxojb3p6+qi5di0deWvV546SX7qdO6LJuam5l/A+Jav6RUZcAgHRiWqFQmZeUV/Ruamp2mTJ3mM23GzMCyiioPVW3+qJTo0Xkxw146nOD+AdKHf40UxzVIsiA/4L3L+m/ozguP6YDqA9aBNidoddDdK+jogDXA4wbxo049poOnaSCRvIDnFpICrwsdpAT/TFogLwiWdJAWehuysDvIiLgDeeSvyIr+DTlxpIOEP8R9SNhb2WP5mli+LrpGw532yHxy7q3RiVstr3/H7ZP8ajlmfhk836JFroZnvn9WRhCQHvgQ2UF/QBb4gB4Tgu4hOfBXJNLniaTP6TfqAkaMOAEXD84Be8U1r7wW2tG8UfSAJv8YA0vj0u79rqxNyQMGlC2uyduyvLJoOeZ99o/ec+bNs500eYpFY9No9n8DtVptQJwz/4bkBcalarVpeWX1ANKHy5Su6T5dM2cFEP9uhSXqwNwCdZC6vjA4OWLs6EjnV6/HD/oAqUNWINH6ayRYUo3Qfy3lBSknpAzcJnJCPNUGrANtjZio0yvw3DzrgKHVQfzI40jS6kCjgQTvs0j00XiB7yMNMOLJExMDrmlwg/Tw85+0IOEOZKG/ITP0PrLIT7PDHwhvVWrijv2W71mmhZY73cdPxuzTeFVoeNWCuc0OenTM1vCcQTzznCxznRbwOx3v0/EeUv1/o+Nd4v4OfZbb4nPEEfdhflfg53We8v8xwf9g1/3wc94s1rzwfIAR1QE8BmBlUCH2PuV963i/MLv+6qnVBdv+XVO0Am2zg42ndU237ujoMKtvaDQsr6g0IL4NmXuVSmXE/JeVlZlXVldbU33oMrVrhs8MiX/XgmKVX35xaVBhqTq6sFgdVTY60zs+aPK8aOe3iM+PED/gc+EFCZbfI7n/OrGfHeuA9zdjL3i8RjwsesZ4tyOP5QTWgXb8SKuDJI0X8Hgy6yDB+zwSSQdJpAPWgtAAnRtGAmkhwf96txY4N+hqISX4toin9KC7IrbkIb8TD7+LY2boAwH2XC0yQx8KZAQ/fOyx4inI0sRuhgbMLSOTuGV+U/3vQ078MseyQIljLZhrGXEtE3zfIt5/ETrm98+5LoY+Y4jPBfh4nhL1H/Pv4XIAvm7bxNqXYJM34WTVIPjv90/eP79U7BfM+186DFTV1BRuX1JbtBr1LQ0OHZ2d/dvb203r6uoMy8vLDYh3Q+H7pcS/Wm2iKqsg/msl/qd1eRP/fuQHLvlFpT4FJRXhzH9BiYrv4RtBiC5QFSfFec7bGOP8Pvn/csSbfYVki9WiV+C9DFM5J1hTfThwl+gZE+z2kw4kL4h3lLyAdcBekDxEO4Z0ROiANZBA+Y7Hk1kHiV6nu71A6IDOia4O4nwv6+hA8gVJD5Iv8HlNp/Mrne9fH+NAC4mbe/8jcMzy72lj90ludfnl/59G7yPFn7wpgHTp/7MAv0etj0nvnT4H1bxc+/K4CI+b+1KdxP2fs8dhaa3X4O1i7QOvfeW9aMnrRR/4aM/jMjjbqwJrSrYtrC9ei9rq2SPGtLdbNjc3G1dW1bL36wvuS0oMi0tLOf5N1GXl5kW13iZtY6YP75zaNoT49yE9OOcWFnsWqcqiKQ9Ekw9E5haVRtAxjrwgraCoaXyO7OWLicPfR7TZh6QBygnm3yDFfC2SLDeIfS2TBmwR+xsKHdjt1ejgULcXJIlxA0kHj8YS6fkIyQsSRp4UOmANpNC5iH9CBwxtbtDqQNQI3edU0oF0zn8hbm51HyVNSODnWp60nOki7Ynn2t/R/j0tr7rcSrje7U2P9HmlO49xbcPvn+scqd45J/ogaYz0BIKoV/KmHDl06EGq/w6K+Gf+ed8PXvsYbLoEbtQLaPavFntE21urxfWfdeqtk+uLN6CualFkfWOjBcW+EcW+flGxivs9A4Ih9YCGecWJBm11n+ePa/gRnW1rMKdr/YapM1vcauoaBikLioYXq8rjKfajKe4jyQ8YceQHGQUFnctKC944mkN+neiwAdFGnyKBepMUs5VIMluDVIv1QgOpVluk/S6tdz4aPyIvSHI4LHmByxGkkBcIHZAXpGo0kDj86GM60HpBMuUErQ4kXOj2hOQnaoR4HT9IFTr4ayT+BXR/JkHDZ4IOr1pu43W4lbR4SeJW6FOHXw3H3PNK4x88DnYCMb7HEUmI8DmGcJ8jCPc+jACv/Rg1cj+GeRwQuX+I2x6qAXfC3/UnBPOaeMsP4GM6Hy4WzcIHmH/ngRViL6Aa1baJdapNqC9fml9RVWNeUVHBMd8vv6Cwb35+fr+CggL9gkKVYVPVh/Et1evQUvPh7bFjXp7XNfXNFxfO+eG96tpSG0V+wRDiP4HjnzQQRbEflVesSswvKc1QlU1dV5Dz+rFseu+8F0bqoK2INPy6WwNppIEU83WQWf6EFMvNSBuwTWggdaCkgVTSgPACHksedFRogMEaYC94TAekgRTKCcIPPP9rHWi1IOGKQLzm+DQ87TXmUBfMpxZS7nkUt1qIMQwNmFuGNMZxkl4/2c1xDHEcTRxH+B4mrg+JOd5wnwMI895PvO+n414BvxHSGpih7rsF754U+z6DtyLQbSOCeC001V9B5m/B23Qm3C3HwLl/I1wH1qU72Mn1ast2VJMHoLFq+bhSNfl7cYlBXn5B39zc/D7K3Ly+uXn5/UqKG42aytehTrXsSl1LbWxz+/jUzqldWfMWLKqb1Pl2YE5eoXuRuiKR4j1G4wF8TMlTKVOramf9lhqxfHuO/2ko/Y4gc9geyMwPI8pgDRJNv0Cy6ddIM/1eaCDNYqPQQKrVtu6x5CSbvUixlTTAXsAaeNILeL8UKS/Qa+QFWh2kajSgq4N4bZ3IWvDV5AfNkXn67+CxGNWJVS3EOJXfmW5exRim70nNeOYJnXnPx/mN8j5Izw8gctQBRI3i4z5x3bKW57CRe8W17EEj9og18byei9dD81oXT+KbOfcdsoWOm+Hn/hPF/zqJf9uvEGz1b7EntJf59IdD+7ev1a7/rS/fKasv346m6i9fzMsvMaOYN8hR5vbJzsnpnaVQ9BY/U7ZxV3XBV3fK68vDa+qboupHt8S2j5+YMmvuvKzpMxbnyTJTHTT8x5IHMPecC2QF+RM66lunXIn33Hc7K+gIcgKPI4veO++PJzOiz9hvPZJNVnZrIN1svdgzV2ax5XENDNwnNJBqd1DoINmB+HY81q0DrRewDlJJA7pekEJe0O0HnlK/oO0ZRN/g8wgxGug+1oLHH6UxyEecaq9h0PKq5ZZ51XLLHh1FHh1NYH6ZW238avnlYyivVyAEE8fMbzDxG6LhmNew8Lo2X7G2basGm+HvsUmDjQJ+7hsQ4LYege7Eu8uPCOQ9YAetQojdFwiz/gCB/ZfAt//sXz1tm3q52kv3oKsp3+pDGkBr3Zp1MnmUMfFuIM/M6pORmdnLO1rv2brSdS01+RtQVjopv7yqNqy8uja0ur4xsqW9PX76rLnpc+a/kJtXqBpWrC5PIv7jCXHEPWshq1w1Z0WR4sOjifQZkylHZdE5S6EcpaQ6Lov4lBlTnWdAPYDRGshMvyENrBUakJtteqSB/jspH+xGGmkg2WY/6eDQIw3oeEGq+9FuDUh54ZEXxGvA/YIW2vEkPmqh5fBJ6MarLq8ctzGEyFEUu1oQx8wvx2+oDr8cv8wvx3DI8EccM79ajn21axc1HDO3vm6b/sRtgBtx676WOP5BwM9ljUCA83eEVQhmOH2LEMevEGr/OUJtPkKw9Vu8d/4k5tzXelr3vfDqqlaa1FfsIv5/hJ7dP3rIs7L00+XyPn379v1HTfEGdXXBDqjzXmgsKa8KLi2rDKZ+P7Sytj68qaUtpmvGLNlLr76WW15Vn1RUWpFYVFrGGkgkD0gtUOfI6upfuJ3it+9sIp2LRMpR2eSNWZ5HIB9yCJlU08spx8uM9yJJf6vQQLrJaqGBDLONj3zAcvtTNMBeQHnf/ki3FyS7HhUa6M4HGh1wbcCI08wvacFjzDFeEuJGHRHHKDp2w1v3+aHu2NX1ZuY3gnSt5ThEJ4aDyadDNT4d8Fgc734sjnlNHvPMfs0c++hw7O+6ViCIuGUEEr/MbYiG3yCnlQgd9DXxvAJhDl9IXNt/ijC7jxBqtxTBtu/tCrV5Z0GI7aKn7vvdrQGK/4bKnaireCE/WRbaT3yvZNu86oI9KM1ZMqWkTB1Uoq4IJf5DVRVV4WXVdZF1Tc1xk6d1yV59fXFxbcPoNKr/UqgHTCX+U6jvyygqnNRRW/nWxWQep+FzR587k3KnnGJORn2KkvjJJO6yqe/LMNuHVMPdSDH+DhkmP5AXrBMakGs0kKHVwIBdYh/0NJ18kKajgVSnY4/Vh9wv8viRdgxJO8cUp5l7ZkR7HngMzKcWzDNzq5uDw6nODiUthxGYY47lQE0sa3n2pc8a4LGrm2eJ48d51sYyx7GfThwHOzFWI9CR43cl8f01wl0ojp1WINT5M0Q4f4Jwp2WIGLQU4YPePxPm+O7qUMc3XwtzfnVyrOesieTxw30tR/f4O76f/Kot2/ZrrfowGtTf3asrXfteddGu3yrztj0syZ9ZVqxWR1BujyT+o4n/2NKK6iR1VW0S1QHx4zomJr74yquZtaMbIkrKqgpEv1eiSi9Ulyrqal7ZmRmz6UwC+WWyD5/v/UijWMqiHJo2mPgfcQbZFK8KiuMsqvfTzbdLsU/8y43XIctkIzJMNz+mAb7GQFcD2nyg6wXcJ/A4svZ6g2i3Q4j1kBA/lHV4EDHDiGt6HDv8IKJGHOg+RtF7jNZAl2fmmGNZG8+Bg/cSt3u7eZY8e7tOXn6ca4nndSKWmWvOy+zVzHM3187fCJ6jPFYgdtjn9B6XI2bwB4hwff9UhPM7yyOc35oa7vx6cbjr7IBIvxRD5m0e+jyTVeXYJzUj3jhNluWQU6D2+p/wrv2qUW9YVlG0BxWFh1FZsAvq/K+/pZhPI975HrMRxGlQTmHBKHl2skdOTn1QTf3Y5NqGlrKOKTPGvvjakvntY19cmascX0I/m0fIKSwaXdVQ+9YNzq/Mf7wPr7faj/hh+5A56hzSKO6yKB8rhx5HjvNxZNuQDwyg/GCxAwriPMv4JwG+vuRpGpD1J68YsOeJfCDVhtp5RUa00wHEOB0SiHKl524HBCKoBolwPygdCeFD9wvw+lAtgggBHow9AoGPcf24dzN8XB/3b669/agGC9Lwrct1IHl2mPOXhC8Q6f4Zoj0+RJT70ntRg9/6PsJ18fRItxcUCf7jBjE3N4nj7ErnXsmpqfpJSWn6Sckp/ZJSJCSnpBqkpKaZpMpkFvIshUduUaXt/4b/CvXMiOqi1T+UFSxfXUz+nVeS559VkOCRmp7kXlzcnlmU8eqc+uYpLzaN6doyacbsL8ZPnsH7JX84afrcz+e//PrKrjnzviiQv/BhUUlnfWFFkry+5u0T2fHfn5K4P9LNfzj1pzKqudM9jyGD+vac4aeRw/sdO5yUNNB/v7jGLMd0O7JNtkBhsg1y063INNsmkG5Bzy13kgZ2dWtAjBPwvTK044YO+zXzilR7DaJYdtonEOG8XyDSZZ9AmOv+xxDsTj7uSh7vRl5O7zPIbZfYP0BA499+7pu745r59qX+OuAJvrXxrc3VXH8HOHKe/pK4/gJRbsspppciyuW9fdHu78wJc1okNuQurIvxk+clOKSkppsWFhYbUx9mSDW4fqoslXnWZ/6Tk1OZf33BPx1T0tKMCKay9IwBykLV3+b4v/tqLP8+vjz7py8K5V/NLcj4eF5F0X8+qap8eXn7xNlfl2V8ebEk+/WdYyd1fdrU3vlDx7RZn46fMuNdev7VuMnTP+3omrVs8sw5X6vzF1+oLvtwfmPV57dGN7x9nP00gWpjRhzVw+z/ERRPsRRX6VR/p1Nuzh1xAcrBp5HvcQI5tqeg7H8cuZYHkW22BzkmO7qRZSYhg/whnaDVgLgvDtWFQgOaMUPtdSaxDMc9iBtE+ZsQ7bRHIMJFA+fdYj+AEJddAqFuOxDiKu0XEcQgrrn25phmaLn2d90guNZ6uV93fK/prruDBn0jau9AqseiBn+GhFHLED/qvZtR7kteinB6Lf7J859XWNJPocyLkMkybKj+Nlar1SbKvDzD9HQ5cyxxn5pqoKsBfk5xb0KvmxH/tgUlFd7/W/75i7g+0dj46pnmltfO1ta8sn/C1OnL2ztnf5IXsQbqmtpZYyd3fdEyfspe4nsF4TPCtxOmzvyydeLkDydMm7myXD37mDpv2Z3R9f/6OTV4//4oqoE57uN8uK7aR7G/k/LsfnHuU6n+SqV8nDviEvKG8D3gT4m1pnnWpAWrY8gxP4hc071QmuwRyKbHDIX5XmSQP8gt9iCz2wN2iTECnjvguUSeU46334UYh90CrIFYx12kgd0Id94pwWWHOAa7bNPBFgEeM33E8ePQ5TtAU6dJkDxd22/x/Zei3D9AnNe/NiVGdd5LS6uYlVMW5ZelVDw1P+fkF3pkKnI8U9NklkXFJabl5eWmCoXSMC093YD4NWCf1+Wfj+T7RsS/Gf2ORUam3KVYXe/0f+GfuJxR3zLhQF3TrG3jpkz5gmJ8ZY36ne3Fsn9drGpoXkTx/k3LuGnHifevO2fM+WbSzLkbJ06fvX5i1+wN5AOrx06evLq0cNob6ZFrFyfytbtUK3Hcx3DPSzEv49rf9zxSeY6G6vF06s0Uw0+iYPh55LufRaHzaRTankGB1SnkWRxDgdkh5JnuR67JPqEFLf9yLUgDUh7Y2X1/LNYAX1cQZ7dTINp+B2lgp0CE4w6qobchjOG8tRu8PxDvHShhvcbDJQQ6r9Xg++66vJtvTX2uy3mo3QcIdXjrWMzIF9tS5AE28nx/m/Ssih1pmRV3s3Nzoojj6Oyc3D/dgy+noDCC8rc7xbFZXV2dhUqlMs3Mzn6cf10NcOynyUzSZOkW9DMDlAWq2P8L9xn5FXpTZs3u1dbZdXD0uCkXiOuNo8eN26ZOX/5Ape74srZl3NutHZNXEc8b2yZ2HaUcsGnSjLlrSAfrSA9rWydM/qm2ceam3Ohvzor1Hb7HEUr1MSOO+uh0/6tI872IdO/zlP/PQUa1X6rHEcg9iOehl1E0+DyKXE+jyO4siqzIDyxOIdfsGPJNDwsNZGugeIJ7Lf8yzb3S4gl8fVGs7TbE2m0lDWxHtMNW4n87IgiRg7ZS37SZdLBJQOwN5kT523ndY1zrxnfQoO80x1UaX6eezOHLbs651w62eZ/+3iufxAY3huaNtjFNkcntiRdH4sdZrqj4KDWjHHKF6lXiP5YQo8greD41Q7oVX15RSR9Fbl6ULCPTtqioyLyhsckqv6DQhHK/IXFsSPwbEvcSJP5ZE0b0t80pX1jKMjLssnPLA/8v/PPXnEVNzzS1dbmNn9J1nnRwYkzHpHV5GTOXlFaXj61ubN9d3zphS2P7xK1NbR0bm8d37mnrnHppbOfUVa0dUye3T5wWwX8j3nHT5yleVHtTz897sCX5nkWK4P4yxb7Ev9zrLNKp98scfFT0a7x/SaH7ReL/LEocz6HA+jwKLE+LtQW8zoA1kG96sJv/p2mAuWckDNwmrjWMttlMGtiCGNtNiLLfhEj7jQLhjgzpXgK8P2SA0w/wH/Q94btuBGmg5Vw3znV55zG1IJt3EDLwzTH82bNzswcTh+5p6fJBxI0DHR3puWtGdvlc5l8mLz+Vk5+fSvxHZSmUQdrznltQ7Eme4MWx3NLSMqChocFSqcwzJi9gfxf8M9/dGiBw3k/PkPcnjfWnvzU0U1lr/3/ln7+mzJyjN3bSrF6U66dRTt8zpnPaqeZxkzc0j+tc3tA6fn5j+4T6cVOm506ds8BD9/cmzZ4jjsl26zq4vosddUB4f4r/5cf4533Nmf9MqvszhxwT/OcPuYQij0sodj0PlTNpgPgvtCAPMCc/MD0pacBEygUKs31/4l8b+3wdSRwhXlx7vpk08BNi7DYgghBpv554+xEh9j+IvWF5TkTw7Kjr6Y8gxswdVj3B+wr6/U8RZruM4/1hiM3ia0ED51nqngfiNoA4dxEaIMjSM11kmeopzD8jM6e0NVuZKxMayFG68u8oC4qSyPtdKfatOjs77dTl5RZZCgX18+lGFP9GlOeNNXW+0AAdjelvW8rlWdb0v2yylP/7uv9/81VUpPrL19Kt1w6Pp7oqhuq8FJ8zgn+Z3xXi/jIyfC4I/jM8pTWBWUNOIGPQceS7nYHK/TKKXS6INWSlNudRbHUBBeZnUWx2CoWmx6E0OyI0oDA78JT434Gk/hL/yYL/TYgn7pn/CMH/OgHmniH2BXb4jnSwuhuSt696gveV4h6KT8Z86MB3EWj98q8j7Ub38nJs7/7sqekZejnKfMP0LIUv8eJMHDmxFhKS5SPS5BWC/7TMsm1U6+UQ90mZ2TmxRaUVERT7vunyLJuOjg77cePG2ZD3m5L3m1BsG1OcGwv+dTRAmjCVZ2YNzMjMtpFlyOyylFVe6dml/y+o/299JVCsxXgcQKIX5XjinGM/w/si5f0LYg0wx372sBNizyWZ/Qmq/U6j1O0KylylNYRq2wso6X9ReECJ2RmqA04IDeSYSfznmO9/Kv8c+3xNIfMfLfjfgHDb9Rr+1z7GveD4r9DN/ZcI4pi3+0wT80vZ65l7+NpM8Q0cOOuZp33+9Myy6HR5uivlZWfycGd6PIi4X6HVAHlAZ05eYX6BqqKwWFUulysUw0rV5YNmzprtWlNT01+hVJrS75owzwzi3ERHAyb0miV5vj1pxjZbWRzjmfD5U9/H/19fSTbrXoxypngceZj4Pye4Tx91ieKe6j7K+/LhJ8X97fi+hqm2R1Ay+DrF/1WUuVxCqeOFbv4LzM93e4BS4wGsgafyP2Db4/zbbhT8R9n+2M3/f8k9c24vxTzzzjEfbPtpd33H3IdYvwrfgV3f/VfnICNHXSzLSHcSGiAPSJcn2VEemCnLKv8hPatsGeWAhJrmsV+pyysjZ8yeEzZn7ry4SVOm+OTlF1hQ/2/OfQD5vxl5gJmkgXShA/5eZk6OLXmGU7pcbk91n8//C07/J19xVm8PjKVaK3bwQer1TkImcr60/p+5Z9+Xux8X60Pz3c6hzOMGSl0f8a+yuSjFvzkdzR55gFYDSvODxP/+p8Z/nA7/4d38S7Ev9gF/Gv9/8nop5oNtl0vzpiLu30aQ9WL4W8/FcOtxXoNty/7y8yelFellprQ+J1eq6on/bg0QPOixB+WFoQkJcQ4NrZ37P/rks731TS1x07qmh1fXN4aWqMviM+SZ/elnrUgDFqK/k0ka4F6fOO+vzCtwptzhSLEfmpHT0vv/IbX/7a+UgT98HeO4TVybJRtxqhvpGu55fXCBO+X6wb8I/it0+C+zv0D5n2pCi0vCA5j/PDOpH1SaH0Ym8Z9pceCp8c/8xxHv2tgPf4x/qu3sqW//y5j/6gnuPxbcc74Ptl6C4AEvwcd6yvn/zudPlRfqZSmqTDOV6plp6Sl2rAOKZSfK604Uu4NS05MGTJu+8Hp1wwRMnDL7Rv3oMbW5hcV+xK1nQbEqTaHMcycNDCANWNHvsQbMWQv/X3vXGtzEdYXlOjPu9I+xMbaklSz5BabgiQPBmCAj2cYpuBQwtmTLkiXZlt/GDzABAkRi2pKkQF1ok5KWxhNKUjJpSaZAaEkai+HZ8kqbx7SdBpQBJoFQakpplMH49Nx7d1e7C4EkmGf3eM5cfV5p5+797nfuufu4W1nlNFe7PdnYD9K9/s67TvvEJqR2aop1z5lnowZLM9+n1+ArHvwAuUcfi55+nK4x2pBzEbm/AI3fPE/5b8g8C83pH1P9E/59oxj/HgX/1Tz/Du27lH/2nnSee/QSwj16Ic8/y/v6Kd+Ef+rpu5hfk3uW41uNr9C4T+6bKDD8DCxcLzxsDPR80XZwuFpjnN62pEp323bkcRxymoXz/gzM79IbmltwftACLm8n1DX2QFv3ss/auhZvxr5RiD4R+0IR5okTSV6IvyXzPDLPJ3O9TNR+psvXVOxtXx17K3m8WZtr2LVxTto79Hz8zPS/07leVc5pum5x04OXqPYbx2I/yD4PjVmfyPgn8V/Qv5J/Qf/zdO+I/DP9s9hfItF+EV3zPXRD7lmet53leuat0bhv2Ey1P5X7KeQbnoIbH7Xc7M5ajd/veaCmrvsw+lanp627vKqpq6yi5TDJByvd86nXNi2EzkUrwq1djx/3N7V7sZ9YkP8pqPXx2AfM5PoOxgMD5vzpdkdFpqd+wU2d679dVsEduOQad5Ke4/M/9Cl9vwThnrzDSOC/eTTyP/ostGScgUazRP/JH1/FP3m/LtV+ynsS/o9K9H8Q+d/L+EfuybvhCs1vyXm/DvdE+yTuE+3bjC8i/5vAot+I+l9PYn/fV2kDu9NFS39rR2dt48IP3HVd4K7tgmpfB3NvB2DfAH/LY9Des/wccnulpm5+n9vjKXB5a6fZK51ZpA8Qx/EjE7fNGlaSbpGV6J+PsepWj7Fzh+gcv278OfDnXKC8U+6z/w0tY/4FTVn/hIaMT6AetU/493NnJPH/FD0PRPI/Uf987Gf8s7GfPD8yA7mfYdhP+WfaD6H234IC85tXc4/OuI/yT7gXcz7jy6L2LdxzMIVbC7mpj436qm1R5a7my/aK1q4lx/1tPYc9/gXg8nXTvuDydYKvYQHUNfUA9hFwuEi/6Drn8jbMddbU5pHzQw5ndbbXP9/hqX/8G1XezmHj6VZbkX6jy4ExunbMSZznn6HjPXGyThTVPsZ+fzrG/rQzovZrk8+K8z+mfcZ/ZcrfZGP/LMr/Iap9yr9xH+Nfov0C8y7q1+Ne0L6F5HxE+4YtyP0mer/sVMz7MPZvGq72sDubEpHzPzS1L3q/sX3JRdS72AdILHB6OqGqpoPkBhfqa59+2zHvCTfG/NF1jUsW1TcH0tx19w73ghXqNywi1+y9qafAn3WaxnviRPeUe177DcYzstxPyj+J/YT/Moz9ZZR/pv0ZPP9i7Kf8R7Uv8n8N7i2mnVdpn8Z+A8v7SOyfgnlfzricrw1XW5Q5amhpd7YUOWvaDhDOBe5JLlDhbIXyqrbBCmf7xQrHgk/Ly5ed9zX2HHZVrs7w1a+6q871fBmz6J5ZbE8+CF5tGOo5nOPhPI/wTnQv5V7I+2js5+f+n6/9I7z+o9ovTGU5X+E1uL+R9q3G34h5X4Ee8379s0T7RXmZS4e9PeaUs7ygyuUzOaqbV+F84e1K13xwuFk/cLjawYnzA1dt93r6vZq2Ya/D7baHjStnkmc9yHlcwi/ROeFccML91fM+pn0h7y+TzfuF2L8fpkv4J+97E7R/be53SrS/LTrfR+1buV/RvK+A+wUZ99fd6jbBeZ34ubljyaSmzsVzWzqW2jEfuCvn9zdjgH8Zplrz9ORNH1UlHqbXdn0jP4S6pFM856d5Pykf9wX+ee0T7mfpjtA1RcRxH51wX2ji3/cn4f1a3FsI96ks9hekvsa0T/I+7kXkvo/E/aN3ur3uR8tKc9NyknZVb+nIV6Eq4QhdU9CT+A/sC8h3Ulg83yud89Nxn9c+eS6I5vyUf6b96cY9IvcW0xvo1+FeMu4T7oW8z4Lan8a9gOW6v5A6juWfkVLt1ljC+GLtlJTe/tKEbWAfcRCcCX8GV+K7UDnyPXAm/ZWOE+TZEDt/399cet8n0z7lnjtAnXA/3bibcm/juRdcybvIvUT7hPsCzPmJ7h8x/OglUrfxaY13unnuezNkFNGSMznz8lLWvlmcuAXKE/rBkXAAKkYeAkfSUZhH7vtOPgpzUo5QL9X+Cb6t+yN8S3+Qcl9s2Eu5txrZO5+l3Ec9GvMt4pgvne+9ROf5+dzqeXe4Sf4vzZDObl/UZs8yTdB+b4Ml+SfwaOJWKB35e/hOUghmj9pDfVbKPpip3Q+l+r3UH+V2Uy9C3ouMb4A19Xeo69fp885Wid7Z/3aImrcaXxXjPbuut773DjeBagrLNrfOnqD7/guTU9aBLWkzFI96BUqSX4Pi5NdhespOKNHtgCI98srtoOsb2AzbMZb/lpaEZ/JZcJuRcU5yvALuZX6M//kJi+7H9/6k6j41Lq1E/Jya6s7O4ZZ2Y87468m6tR/la9fDVO3zYE35JVi0m+naBtN0mLvrt4AN+SVOPlMn+TzO5ZHvS4/on91m0T0z/yHjQrqmcV7aU3fs+FT7cmY2l8lwamq1Kce4sGgCt6IuT79q2WTu6eAk/ZqV+fofBvP1a4KT9T9YPJl70p5rWJ6rGR8rXivNMbff9rqrpppqqqmmmmqqqaaaaqqppppqqqmmmmqqqaba7Ta49y0YkMFB6wkZHrDKv74mXo7zY2RwqEsj219EE+iT7Y78XPKNcCAcgP4o3jOkiQdJDY5FNDFwTPguQF8YG/0Aw/hNsAY1GivDn02IA3gCSTHtYdsnxsLQcsTxu9nPl30IQxHEcUGKg4EQXBlAHEubYEgTyA1cCRMcInhQsyISuBxCHBOysiMZtF4OEtZDJoq7plkv014QJm0woLHFWf9LsU+DNQlrbLH9/6HYzDpLMObEAP2QK3SfExFajBC7E9suLosRCNPi6wr8gICtShyihXi7tUmJafU1MZqgHMfyOJ4vVyqwJsRjHtp8rBTXSc2XY7NGsf1zcPQZvFwFXqLAETmOidhoIW7PV+CEkAJr5HiEAvO7DSqwcrsCa2wKbL4BzlVg5cJD0xTYqqgQT3gURxR4SIEhqMBhBY4o8KAUxmDHV+KgDF9QbA/LcGz/MTnu65PhuL5+WYXj+wIyHNgow/GwUlo/VOF3IdqCcSRcBKNLyhCNh6Q/lmOrHMdRHI7u3ETDVdAn7I3iiGmAP1KGgVesJhZ6ZTgOnlTgfUJA5GszKMdDgywqDgrHttwkxxtY4B0ScDiWj4L80QzwgTjI4wg/OLFDDOAPrRA9RBAD/4DYWJIKSYL6IDu8qAltLViIUSHagNC4vA3JdkdroBhD9sC9av8D+gAhkQ==\"\r\n Icon7 = \"eJzsvQd0U8e2Pn7ARdJRsyXZluQiuffee+82YIyxMab33ntNCIGENJJQEkJICCRAAgkBQgKE3iH0TkLv1XQb2/r+e0bk/e677+a+3Nfu+7+F15o1kiydM7Nnl2/P7L2PIDQT7AQHB4F6s9DbVhDaCoJgNlvfL6HPN9JnEREv3vsKQk8nQcjIsL4PyBIEVbIgdO784v8fCkLcBEEIoGs4sOsI1s//6A/A/6rmvi1wnXdZf3R3e2x5vy0wvQQYEPMAhbqj8NL1OPhHvxNHujm7LAj/xOF141nvoLHvj4hotAxPuYPhyXfQM+QxirVHERDW57xmiHsxTVvLfuMnetq6TQ5t6fl5Qrn5h+Q61XjP7+h/zQMFmIdEP8asqkZ0CbyLVsZfkdpsHUI+LETa1WrErCut10z1vSof7HrXtCkJXj+kwHV25PnfxxJstySzjdM19Ik4gb7Rl1DldQWpws/QFybC40QCQi8UI+FMFcJXlkD1rq/FcVGwJXh3/lOXzj4LnN7yXWQYnfmkumgv2iQc4vOu9LiOaNePoPOOh9Nwb7h2pevkFMEYVQjXmRFQv+0Lx/mB9Z4b0+C6KQReA7pjSPw9TMyzoG8EMDoF6BX9FKmGNYjRfo5E16+RHvAjksK/gsePUae1y/zWaSb7nrMd6fCzcWLOlgzFdkuvwPt4uxXQ3vsB+kTexOhUoMb7Pop1J9BSfw5d/a+hg/kYvPuW3w7t2euUcXzaBofZpuMJuuVXyzX30SO4AWPTGzGK7t0poJZo0Yg2HpdRabqBNPkO6s+jbdIh6ObGQvt11HPNXs9L7m06Psy3uUb3eoZ5HYCBsQ14rdCC9yuAQbFNdN/zSFIuRIjwxochme8t0L0dDbsZLiHCp4IyRJgTm2m/826f0GcYFPcEfaNu4JXcBkzIasRUusarOXUIF5Ycd52ZtEXzvt9t50+DfnGcZuzJ1stfGOuTpPj+envzI7zVAviA+O698meo8j2MflG16OT1AKmui34Tp2jHGLaF9Dcuj96im+I3jP3Wx36oGC9feiDdYT1KDEfwVssnGJpQj3ntG9An7CbyNEcR3/wHaPom3Fd8q+7AfmNanvAsYH5al3+2rFmbwNtzkyBYqDVKBCGFhKDxhe5gfR21wfaCUEv/v5AuCJvThX/5XcCL72X8/0zPvGz/u5r+7dDSoGPZ0ExNQIbuAHIUp9He9zzGZT/EyNSHGBD3GGMzH6Nb2FW0cDuOYsNhZGu2IVycBYe4oJuClyD7j95bO9o3M3hf/lntDB8o2vsi1+FXS0+/OkzOsXBd8ElHYG41MC4dmJIPDI5tRGf/B8hQ7kaq6ju4BMT99h+5L/3ZiUPdxnltToNuij/sS7R1zdsLPXPs7m54JRmYVlKHEWTDRqXWYn7npxiZ8hxdg+6jS8A9dPS7jXjxOxQ6/4xg49DDgrNAKEBQ/8n72jjPCemjnxf+m9M34dBO9oNdjHK+osLgzf6fJjkypaW6FtPyGtDK9Tekyn9BpsNutDAeRbnHeVSYrqDa+w6yHLbDKJTD9EEYgjZmwzAlpNY+SbVdkDTrKXNThovhjmEyV0WK06SAVnTP9N/vH7A3d3vAxiwo3/CGcoK51sZTFvOX40uQrJtSormGd0qBz7uS7o97QHr4OSo9L6K1+1m0dD2JIpeDRP9NMIrFyDhZjvQ7HdAGI5F9uQvc5kTCvo8LlK96QTXaE8oRJvisTIXH6xHr1V3dF4SfaQGfPZnPfValwsYszftr+iTbHVjYybMOy/oAO6YAS/sCr9K3hsaDxnCJaH4Uudo9SJX+DLOxDTLuVyHiVAuEHChAR0xCDSYg+VIVzF8l0Bg8YdtZh8ArRQi71xLBP+VCLHPerf7I/7zrjPADf2t9MoWzMypc2To/Qe/Ic3il4AbdvwnjMiwYnwG0druMYpdjyHU8CG+hK5RjjXDYGAbng3EwHU6D+4lUhD5qiQL0o9YbSRfbwunzUKhm+Ta5bIt9lHSrGvr3Qxkj/Ju5206ShPhHjN2QLTtO674b0TZrECR8hmTVamRq16KasExLwkPJitVIUf4Af5thUIXr4TDXF0lPauC+OQEuW6MQf7wMUZtL4TM3E0FvFED+oTdkw92aFLN9HvvdLUThw96QV+v3ELTtYyM0by2E2beVjXf73G6UAwKcZqBGU4/hqXcwMP8aWgcfRp52P9Idfkax8ymU6s8gTrkEfqoRMEtq4Ch6QdneBR4jguHaOghuUYlwVxXATWgBT6EnPISO8D6YCZdlkVDN84N8jje8t6TBf1cWHPt6wtDPH+ZP4uD9dTJMi2PhNbcAbd+ZhyGvbUbrfqvQJecKuvg8Q43PA/QOr0O/iCa08TuHwIJRMId0g7EkD07dIuDYJ4DwWRTcX8uFO81Z/1o8DBMSYRiaBE1PT5CsQ9rfCOWb3lB/EYCoc62Qcr8DgvYXwP2nRBh3x8F5eTji41eikzdhlwRgSAjQL4TJANA/2oJuwQ8J2zVhYDRQZbiHdOVmRChnI9JxNqJU80kfzkSE7CMUexxBccBBRAfPQ2zCx3CZHgK3WZEwrIqB41fBUE4nms30uSNb43dB/p5Xo3ZGAGLPl8A4KgOJwga0Uj3DN8T3DIf1jST8GGbB4DgL4SmQPrJgeBLhM5LaCtM15Gh2IlG+EtkOu5Ch3ow01TokyVcRf+5CnuogKlyJVxt6wXdvFmTTTNek0zweyGsMv0pClO8p+rnPdNoaDe3yAOhGRdTFyVYgyu4z5KiO4LW8RizuaaH5gvTdI1R5XkP34Ad4Nd+CSTnAmDQaW0QDX5cCHc3X5TiKSDarPK/SZ3fo9TFUeB5DvHwF7Ke4XmlWoZ7q2NU5w+901lGfTalQdDFAMcQd0nGunwsThcwozRfnKrQPUOV+n7BuLfpFPsebLZoIR4L0bh35Ao/oHifQO+oc6WJGkwZqz8k/+I104ymUuZ0lnbiT49y2HndozBfxZsunCLdZCukb2ovaxb6n3Oakwm163m9OU7Lm619Pel1a47zUc1fYdH/ziL0F0nMo019FK5cb6BpQj/5RdZiUbcEMwqSM9v2igLakfyq9LnOa9AyrRzuv2yh3Z3rxHDr43kGh01FkajYhTbGL240pOU8QZbMWvj1qkFEwFwWey666KsvHetWUrncpij6tXuD53PRlFuIV3yJdthO56iNo6/aQ8DuzNc/Qg/D01EIQlgeGJTIcTnje/xkGRD+nMQEd/O4RnY8TzW+Sr3CX1vwQctSHkCLfjCLVecLsN5DV5Su4EuN4fV4A52/CG1XLvWE+lgzthhCoVnojyPwmsmyOkN09im4BzzCtAJjREvy+3YIayB9ooHUA5703ShktGjnt+5AcdvC9T3y3B90C65BN/BYsfR3Jsk1X02Q776XbHan3Gz8YPjXdIfvMDPkakv3V/nDaEg23TQmQL3e/oK8oPpshHEIrp9uoNj+jNX+ElUOsOn8BIfQRyU3Ec0/xWoGFfAJgfidgTrV1LD1CnhP/X6I134Ear1oECONgENK+ihCGRrcUHkSnCT+FOX7nc1U2zBniO56r5W94vitONFXbDHJOFqoET4lUHRTabPrRZNlWtNARv/g8oXk9IX+RfMfIy5hVCbxeBFrje0SDevIrGoj367G4F/BFd9INcQ3IJ782XlyBCJs5Tw0lGUc1VQFP9NGp53z8+ryvH5mwwvWLSIukk/Plv2VnoqQfX0gVt6DU+RzR7xEmZjURrqkn3qpFjd+v3NZ92JZhnjpa+yeYVmyh/z/BsASSyx6Eh0gGkuUbECR5pc5FaBHp15j+nW5bBJSLvaF6yw3u+xOhG+/3OtkZyV/fO1ZctLCF02WUu9RiGPHU67Tm02iu04sbye+2Yq5il1/Rzuc0lvS28sNMon8n3yekfx/ho3Yg+/MzvG36HQkVXg1w2xD6hnlX2i337+Ia3JfGrTasiK5zGOp5hPnzf31vd1mrfnHyr1BmuIByQy0GRTdhEq0n8zPZGrN7/zSG9AvRPFL8BBV+h9E18AHR6B66+NajbyjQQnsevsLoA0IXQePyTfAa53eD19O95G4/xl+UxKlTNAuDD8pTtJP/8r4msVoTKI6ZFydfQrhxDZIdF6MF+bRd/B9jdlUDv2/fiOe01hasHk6Yk9a/2vMC2gcdILt7h+zxTrJ9J5Gt3Itw5UcW595R23WLAo45TvN7+/d7OO+LXURjORfyc95DZRtj5l/eP0L8sGey/CeS8aN03WuIk32HPKc96Bd9h+77K77o9ox46iHJ1CO8W/YEn3Z6jBpTLaaT/1zmdor77ZnKXYgQ5sLYOhcOKz0hH0XC+xf3cDoVN9B9UQxCv8mFrMq51T/br3jZ/te0f/03uRkJWzNYBjdDY60w6VGtIPSjZqLWi7XN/2/vQEItLUMQHtBn5yfTa+ueg5s9vbQIAhqFZtvqBBvUkpq5QC7QZsHm+l/fju1TRFDr/HKf4mV72V62/0PNf03GtdDjhRAXu8GvbAzy7c4RLjyCVMUOtCc/dWzWXfSLvY2B8ffQLfQ+RqXf4697R99ClnYzWrofR57zThTodyNDsw4x9gsRYB4LMU13t7kgqP67xk1/MtdPIhfFXCpv0nzgB+lQZ8T5rUSJ5AFa6+8R1nuACZmNmNkG3N9j2Jvtv40nVMN8ssn02ZD4x+SXPUBH/7socT6NTPUuxEtXwk8+EmK6FvYmm4D/lrH7ClGGOeFH4q5VQDXVG5IcDez7ymGO6Xm/XAbLsCir/zKtGFjYDXiT/JYvCbdvfQUYntiEcekWvFFi3VMcnsA+A/lKl5AoW0v+1CFkunwKjU/AQXcM+y8fu02uulozK7A+6moZ1G/4QlKggdjVALuW0ilRtotTimSPMCICWNKH+f6PMb30EdG7kXzAp/huMPBpR7Yf0US+4HPCpffJP7pPvulT7gfGypYiU7cC3b2fwk/ZZwetcfDfwuD/kabq4p6vnuK9Uf6mJ/xPEv6c6gNJuiPEdvodYqET38tKFH5Q5It37pfJgdmEPsdm1iJRsgd52l+QodqFTIft6BR8kPy2C+Qrn0CB02G0MPzKffYczT4kK1chQDIemuZxEPu7QTXRDPs2unM2/rKv7QWbETKhedQ/Om7bLrp03eygTQHbsqH9KAj6n2KgmxUE+0gV+Qi68r/+foq4eVu29AKqDXX4hHzNIQmPUeJyCUVOx8knIvzv8Av1h9E74iH5aFfROeAu+SdP+Z5Hmmojgpq/Cgcvd2Teq0FeXQ/E/laOgLWZUI8wwy5ZDYlZsV5mVo2QFzkPEnN0gyQSyVhFqcsH+pmhK2wEIfBf8fl0l+5e29MQfKiAsL4HZJM8+HmjJNPx3T+ab7Ls52MF6gvo5PUUH9LsPu/G/Pbn5Mfe4PsYPcMe0XocQKnhJLUTNIcrKHQ+iCL9QfLpfoKfMAK6aDNK6/oi+UY7lDYORE9MRzmGI+JYKVTv0Lr30kM2xBVO80OheysQiqHuUI4xw6Gv6Zl9mvpT4jmN/Xyjh9/BnHsVGIWA9VnQL4mEy8Jw2Ceqv/2jsTvZpcoyZcdvVuktmEn8s3YE8O0ga3urpQWvFTKdY8GIJPLNfC6Snj2MVPJDE8SVNPZ1SFKs4nuOpqFBaNUwBKE7C+C6NAbxJ9ugvGk4zWMaWjwdgIDdOVC87QX7kQYop5EfNckEsbcrYu5XIOKXEhjGBt0gGl8K3pIHl59j6sWlfk+yG3tAN8n/LvMz/2j8AdJxzhmyw0/KnGoxhSRiUY9Gvoe9ZTLwcY3Vj2b7JwNiGtE9hO2bHeN7+Wxfn8lIpsM2wsoT4VUdiyx0R259L4SeKUbgoTxaj/58b7sCI2kthqHgYU9E7SmF/uNwSIcbYVethfupNATdK0FiXXuE/1QASYm2TnAWhmk+DjzhuzkD9pmOC/+erPgKg8Us6ZmbFfqn6BXyHO292J7IQ9JBdzCnuomfx0zMttoBplvZHnmJ/jhau/2GXO1+Gv8WBEunQCPxhvwzM6RbA6DcGgr90QR4/5YN8/F0eNAYvS5lI/ZRJVphKNoSfxTe743oXS3g9WMKfHdnwmF/BMIelaHgVi+L06v+z6VvEu/X6C+Tz2H89+Q9Ubn2UKH6PMpcL5JuOYNY6QoEN19APPIj38+oMJ1H15CLeC2/ER18HqO1+wUkK37g+1fFTmcRJf8IuuZhkLfRQjmU+GKmF0znM6D+PgTiIh+ot4XCeCEFMY2ViKbRR6MN0tEJOc+6QP6BN8S3PEl3udY3m+N60etaDkoxEH4bM2GbpNz898Yt7+KSI47VfxtmerchV3YKqfItSJRuIJu6B4mKlYiQzEOkZDFixC/o9WfIVh1BK8MFLsPpDhsQK36JKHE+gsVXYbKpgjJMA/2CMBjXxqIjXkHChQo4rw6H36F0xOwtQtGaTgj9IAOunWLgkhUNfXwCtItpjuM9oBxualBMMN9W7A1FckNHtMMYqMaZLbZK+6lywS5O1MjNoiDVi4KdUV6hT9fOClwlvmGEY00M0oVf0EbViG7m5+jrW49RoUA3jzoUOx9BjnYX6ZlDSJB/i1zS9208rnK+j5d/gyibjxHcbAq87XvAYJ8Bpb0j1O1doXnbH16vx8CzUwQMqSEwe2TCTSiBUWgFD6GG9NVw+AqDYKbX6tXBULzhCYe5AXD5iea0K463nHtdEX2uDPKBbpAY5JAopU320ao621LHetkQI+STTbDr5winghSEuc5B6+ztmD7xKKa9sRf931qGNpM/R+/8G6hyvYOWxt9Qab5OcnsbHf0e8D3gYs0JhMZMgXtma5g9O8DDtQI6r3AovHSQhyshjZNDmeMKbetAuFTHwVCeBpe8BDjHxcHonQtPbUe4q1oj4HwhQk4WWfnoTTPEaSRDb5jg8HkAQg4XwudgFhwXkA2bEgyfz5IQvq0Isb+U0Xq2RPiuEgQcSYH3qUjkfDsafd7/DpWDN6CozRaUp5wk7PkMvUIbacwPCbPR67A6vo/enyxqz8AmFETsQmibyfCNHwpTXA1ch5DufCsHru/lwPBBKgwzU+A6Mwuen7SEeXEJPBYXwbicPl9NdF4dBufFQXDp7svP/MTXTJANMELW3xXKd72hnOUD88Zk5D7ojlaNQ5BHei32Zlt478ggnoyCw4YwKL81w7tXLyR77UF7DTDUG+jnSePzB/oEW8c5NMHas3iWSvMtwkdP+FnIUMJvnQnztFReQpZ0D+JsliCi2RyEN38fkcJcRAizqX2IYOE1xAiLkSPbj46mx8jWbUWYbhbCAt5HSMSbhME0sKvQQtLBGeqJXnBZEQntijBovwuDx6YkeO5Kh9OmKMiW+MKmsxZ2r7vCc2cq/PZmQ7PBhCCPiSgWiNdlDZhGGPnrfuD4mZ0XsHOcvhFNGBgLPubRaU3oFvyYcJyF7+Wz84UeIdY4HYYj4hVLESl+RDZtNT/fYX2sfBGixQUk618h32k3KtyuId+BndfvRhXpYJfPI2D+Ih6+P6fDvDcV2u/DoJhFvDSI+H4gYcxxxlrpUp8rLkfj63RT/aGa7IWkw22Reon4dWoCgmWvIk22Ezn2dzA0sgFrR1rx89gMRnML8UwTeoc38X3SKfkWjMuw7sWPSLbOkZ1JdfJ/hFL9KdJLp0hvfYt09WbCeQeRpl5Hsr4P2ZqdZKt/RKr6JyQovuFnRrnanagJOkr2bTwK0ReGjbEQp5shm2GGpI++kfDGBcVo0/eiVJYo8ZQXiTM9dxrPp8J9dTzk4wxwGh+GONk3iLFdhHDZB0iS7EBfsl+LegA/jrL6An0jLfxMpcb3Ptr73OJYaFDcM7JpFn6+zObCzln6RzcSpnvC58HOtTLVO/hcWhjP0Dj30m/voJXraVR53iKbsZvs3zFkOrL1Wga3oylQLvKHpL/+nKyNyzDbt43T9OtjGj0HhG+Q+Csvy5I1jxXjTHBaRrqBvif/3OeCZInL+qCwyQ0ZzQ4j1/EA4Zn1yNccRfeAJnTyu4eZ5Q34oMLKHx3IJ2P3Z2fAbTwuULvC/bCx6exM0MLlg/kyHfzY9+4SZj1K9u4q8nX7OJ3ZfGp8b2EAzbHKfBuZiuN8X79z0K8ItfkCkrGu9+xaa98hjGOU+SgDPGZHTouvbYcQwiC62cEgHwBiT5Lr4a4X7ZIdKpjdChWmT0+33dtU7f6E/A7ws9OuAaRfQuqIN5owMcuC2e3YuSU7s2vk5zZ9Iuppbo+Q5rgGE3Iu8/F3D26gVk/8ZaHx1/KzPna2U+19Ey2J3sw+ZznuQBv3q4RLHiNLdYDm/yuW9aXxB/+KgGYfkh9lPK19xW+T+4qIq/qfgmDYFAaHFT7Xm3/hvFu63mOF8guftYQjNskrDAku75j6OJmTlyRLfkSR+ipqPB+jwOEsKlzvorPfM+LlBi6nDO+8U2Y9C2LvuwXVc5nt4Hcbqar1aEd2YEgci5NkvP+Y+OIGtw1tTdfIxv1COI/5Cdf4OVmp/jTZvhXcj8hSHqfr3cO6MST3oVdJN30F8VsjVMu8YXqnFIFl/eHp3XdPrOTTUYnCvO75wuw+JiGn2qZGfcBvT+YT/6PpCDJOR4btARTpTqFAewzZyqOoNj8lXPOIxvmY1vou8c2zf/GF2RowPmJjZWen7Oy2O62TVa828TNGJh9s7K1cz3Nb157wKTuPzHHciwqPazwOgOmidNV29A1/jPFpT5GvPIGE0GXwze6PgqCl6Ki5gg4uvzwwCEXdHGOC33CdkHBJ3yUaygpPOH4SAP2eELh1ao0UYRPHOamKLchQ7EUb411Uuj/gfNI3spF4oYFk8THJahM/e5yUDa4rRyZbdU530pfsvGRgbD2tVxOXaaZTW7tdIF4/ROO9TvbuHpdVdjaapdpPNDpCtP+Fn5FmyYmvBMLfMeeQ8s5kmLq3RPDbbeE3Px/GBRFNDnN9IM71hOlACpy3RNLaBEC3JRTq18IRa7sEadIdNP5tSJfvQjsT+eGEdUYnW8/GGc0HxLBz4yZub1ks7Vvk07CYTDaPt4mnRqZY+Dly54Cn9D0aOz/zbSC6n0Ua0YRhi56hdYT1DnHcFymbhwzxANLE3UgX9yDH7hyyAw4jeE5fJJm+g3SuKxSrfaDaFgjlz0FQbgiGx3HyH38thGlfChwZhv3SFQHBE5BBOC1bcQKt9bfJjhCfhDfivTKrr8X8lG8GWON/Gb07+T/B6NRGLgcM+zOd805r6xyYfDDdb+WpRvQh+1CgO0x6czuP1Wilv8LtlVnWnuzLpL2J4g9vJojfz00WN65KFbavMyxocVM6zwBdbirR2gvK1UFQbw6D6sdgOBIuMOyIB+lQKOaR3/ux6w2H6siDybY/W7LEk2jpdA1VHo/QybeO1vkO6Zmn3Nf6tJP1jJadFbPx9gp7RnzxDK/kWe0xiyf7uAPwCX2P6SbmVzLZ7uT/kJ83ZznsRopiA3oEMdx6DJ7Nu8LDrsV6e1GI9/QUZGWmk+os8QvtOOGZxPVczBzdzCBIR+ggvu8F+Uyv5/Lp5pPiZI9vpBPcZ9gOcxnQfKBTG7vejrFCsuAQKkz9PlG2xpKjOIV2bs9R5f4IncnWdAt+xM85hyc9wxslz/FZF+DDKvAxM33T1nSL5PQ5x2yjaC0+rLTgq97AEtJ/i3pa16GD70Mus0nyteTTrEUa8WWw5DUY/DLqvE3tb4lOiufhDqNPF9ldeCPO/vMJHpp2IzzXJB5zmRcG+27OEIe7pcgqXTz+ln8SLQjNfCS9fgwVp5Ot/Ropsq3IV59FW3drrEa/KNL7gY+QIu5HPPktnfzvcVozn5HJa7nbVYzLfMjjmruHPKP53OHxwYz+jNcWdCY7RnJTZryCJHEdYZ+PESgbB3eh7SDdkcDVum/8GG0hGeLy1CZC+F71nutp1WxP+G/JhryH8aw0XTPy7/lXAdLxXzOMkCkeQZ7qDKpMt9HVvw5vFDN+biD+qeM2a1pxPQbE3iR9fRjVPte47Z1dxfZA67k+ZXaAyUHfyDqyYc8wgnTm7EoaP/HSIJLhLMVhRMjmIkicAJNt217cr/4196LpSBqkX3hDs5Z8S/KxNJ8HwfdmAXTvBF0me+v498buLRnUIluzAywuLU91Dj38GzCM5HJiJnhM2uRshr2ekf5uwqddrHudTCemK/eRfbLayO+HWm0Yk2kW2z+I9GUrw22a82OsGkbXSbMgR3WS/Pc3ECCOqA9sNqaa3dv8beKaoItFCL5UDNlIt0af3ZmW0BPFcHzfHx7fxTc2q3Jo83fpLo5xI/l/VuB4Bq0Nd1DmfB+9gpowjmzRDNLp77Sy0pPFfLDYgw+Jlp91tfJzC9KDcdI1xF/P+OfMhk0jWX2H9FMXkvly4x3CO/X4pIb4Rn8NQfZvwlXM3+or9PUUTgsS77Wp+1w+CYMwzWWS1/nsC8bJIXWSYUZ/l51xq6UDjbB3li3+e2MXREHuJXbbFStbRrb7BN3jEkp119AtoB5jme9E+o3FLTA9w+bA9Dvbt90zzRo7OTzxKcIkHyFU+Sr3tyrI363yvE56hfCl13MMiAB6hzShhfYmycxaeEk6XmD3tRsuCXZdGn3DaWbwMyFGmsA+c9kXN8+0LJ5t3DdTfxfU3+mjEPJlxeF/NHapKAnyEXvtiRbnI1qyBClO81HufhJtDPdR7VmLnqTnmB5kenx8poXsf4PVzhJ/7JpqjcMYl0xYh/g8RDEFNcHHSL/UotDpONnrrSh2Oo2OPrXIVZ5FqnQ/woWZz9Uy3+Hat3zGG7+OhuN0v/NCc8Hn9/GoN4TVuKyLhm2s4rWgS8XLfb9MsdBc3P963GpZoOAr9h9EeqYxWb6ObPV+FLn8wu1fqOwdwjmEYQmTsFidt1o+wIaxVn5n8Vq9CdMw+/ol6cP19PlsWpuunozuZzA5t47Gfw95GsL0yj10HfI7nDYhRrIU0dLPYCzPbnD6OuCp02LyRSd7Lv1rmbRZ6B7mfja10XmMf2P0ltL6gBUZTfQd778ev1nWVRYnLn2eqzyFDPl+wt77ybY+Jj/tDNmUdXzfY0j8QwxOOocEmx2Ylv+YsM0D0n21NC8WW3wfw5JqaQ4PSF7vEia6jnEpz8g2P6N5nEeibD0yVDsIy/yCWMm3iLB5H4aoXKjXm6H+yB92VboP/iYvFwsyl1/ibhs/jkDkwjz4fJuK5mrblH90P/1le9letpftv6mx7s/3jYJgetGrWV+72fr+4mRrP22zIAHLrdosCJMEoS7dGthUN8nax8Haq170yhd95O+95UVgUsIfhixZ/y68iJV60ae/Y+3r0qz99Tprr37xf48XP3P4y2s0Wq/TjPpJ1NtQj8mCYD+MejZGCc2D+pSD1n7cfetPMIjniDU7T/OrpX4TrMOZ9O4/QMcXPYvTyqA2WXgZp/WyvWwv28v2sr1sL9t/fbMbpl8Yd6QMLC5F/ZU/5LPNSPRcg1zZcR6fxM56ksSf0d7vAoanXUe/uKvoHXUNfaJvonv4TQyIv4mR6dbXfWNuotC4C8mqVWjrfRa5ztuQpfsZxcZ9SFavRIx8ISKaz4RbahtIEqSPREHQ/TPnLpTLOoTsyH+QcKsSqoUBsH9NA/+8CSiyv48C5RW0dL6Otu63Ue11C6/lW/i+GDuDZXvZbB+M1YNgZzYsL4udWbIz2NGpdagwXeR7lDW+t3kuWonLKWSotyJevhyx9l/Bx9QPsjy1RSa1y/inzb2NemrQ1lwk3msH+btekKY6QtJDDX1lEXIk59FS/RidvOvRPaiBz3dcpnX/le+7tga+HwZ83d8akzo2zYLX8oBXc635SxPou/0iGzA0Dmjv9RB5jkeQptiGFPkGREkXQmMbBWmeAmKIKv1/et4StdRk20W3JWhnLtIed4RshgnSNEeI7fUQ+xlhVy45k6LYeaGtIzAqBrwWBjvzYo3tKbL9cXauxPKxDrxljSfr6NvAY3HZOQyrMzIoGhgSa41rrfaq5efI8eJy5Gh3oEfwHWTo50FjCPvxf3ruYpRjsv1Alxu+ezKRcLcK4ruekOZoINbo+XmymKX7WpAJymz7s4tbKBrQ0wx8N5BoUMbOCh4RDeqwsCswMLqJ1rmB7+193Rd4t7U1LmFEkgU9QupRaWIxIY8wLLEeVV63Oe8nEO/HKD9Gr4CH6OpZDze78g02guD1P8LrohAodjbMkwwzWlxWRyHqcised8jn3sYFYifDDTFD2/P376eJO18pEevQUiR+J15eSuvO4kFY7lmxnuXYHkWOZj9auZ1Bv9gzmF7yFK3dLpGcn0ULw2mec8vOf1q7X6LXR+i7u5GgWIEQcRq87PrBaNsKskoD7Ku0DZIkhx0Sk2KuTLBvR/rQn5rtf9m8fZsHO47wWuC9KqVBOc8PygV+CD5TCPUsP0hTiOczdXfEti5jRW+Hf7XHGif7OrFAfs3SQm5BpQb4tB0wo0UDCnVneawnswtsTVMVmxAtWY5k+RYex1rldQn5ugM8LqLM7Tw6Bz6k/ldkOexCknIVwsS3YWieAZsQAa474uC/LwfOX4bzPGxJtRMkwUrI1OI5UZC+IwrNff5oXvaCYKcK1/1hDGLzYJmbfLzpfZfPwxuCDuXBf2MWpGNd4boxAbqvwiDJ1TwUgxzeFguc9H/r915iV32S+GNdpvwICuS30NOnCYu7sTMbC9p51qLMlZ3L/0prTnre+yGfc6nhOLcHleab6BX2GN2C76O99z0MYvnhHleRqlqHCHEW9EIu3PsGoJrHkA5H1pMuKHrUB4kXK+H2QxzEce6Q5GsgC1XXiwHqjWKcZqW82vC9vMR5pdQo/1Gml+9TDTRddP0o4qlMsP03+/aCp6BSfuF/y+9INoJPFEAzMwC2FRoe/+UwPwDScudbokL0/Xt8Ey6+F5sibkW+6hxau9zB4EgLP1djZ7k8jirhAVoaLqJ/lLW2EVv7DPVObusYLVq7/8pjLwqdD6HUeAR5ur1IVq7h668VouDazxdtG0Yh72F3JN1vh46YjG6Yig6YiJy73eC+MRHyd7x4bLOstxEK4g/Dymget6waboK8nyvk/d1gH6++ZqeXjRD+Ira++QzDrIAT+bwGReyhVrDt7wTnpeFQzyGeb+lUJ2oVif+e3MTKvmqZLZ5FW2dgJNmwD2nO89qD5xyzs96fx7PY/zqMy2jCq2QbWZ2nQbF1aGu+SDQ4wuUjWfEjjxeLI72fQH2C/BuwM1tFcyMiV2Qj+1FXeG1Lg+aLYCQerkDLukFoicHohFeIN8Yg7nw5j+lj8Yekt6CYZOb1KxwXBUFB9loxxgO+WzOheyMQtmmqw0SDbJtOjt3Dz7ZACQYhna6vXRIG084UZDzqzOIALVKVrN2f0Rtxsm+6ZItn0FLzGKOJWisHA0ffBfZOB+aT/fuqj9XOM16YmG3h+eksdqt/zAPkaQ6h2Pkk8p2YjtjCY4cKnA5Z5V/yNhxkfoi/0Artaa0rHg9H6C7i0UXBMP2UhNw73Wj+k9EVU9CZ+lb1gxB5sgWcl0VA8qobbPs4E0ZxgawT2auWzoitrUDW866IPVYG7Rhf2IbJ4TkzFpq14bCZ7fag+Ty3h1l13RB9pBWa56q2/1m9mSB+PzRLPIUSx1sYHGPNs3mrZT020rrvfxPY9qr1jJHFZ0ym//UMfc7PFVk8Vwv9bzy+iMWQdAl4xOOlWAwM05dRsnlc/sM/SEU0qhD3oAppTzsh9EIJ4i63Qd6T7miLETzGvwj9aB0HoA29LmsYgqwrnRG4PQfa+UGQTnLjcQLagzEwnEnmMdw5lh4I2ZQHuxw1bMLFTTLBLoywzbmoa62hmxEIO6NsyJ+df6K4piZHvIAy3SN0C3xG83tGeu0hrzcwJr2WsE8TPxtm55bsHJvhPYYRWZ2yAYR92pqvkuwfpt9c4/FuLJ7eKv8z4G7TAqJJCcX3/rD73huSH8gubQ6B4Wgi/C7mw+e3HHifzYL3Beov5yD0dgtk1nclao0h3piEqqbRyLrWGUE/ZSNobx5iLpTD/dc0OJ9NQC56o/h2X2hH+NwjmbjA9Ib6Y39IExyOK4Xmyj87/zRhV2CG9EhTK+0DtPd8SDr+MdqartNa7kGU/XdIFNejyOUYhibeJSxYh/Y+V0kO6jlOGBRjQY3PI9IDh3jsR7bjTsIFp9HKcIn8n0/hLm0BuVQN5duecGA1SBb4Q1zjD9XhcE4HyVek977zg/hzIFT7wuByJgHmq5kIul+MmGcVPB+hHCPR6sEgqD72g+7zEOiXRUEkesp2ByL5SQe0ahoMjw1J3N7LCNuRnUj+R3CDY0u/TnHqZQ0ZsoPIILFhZ6dpys2IkX6JaPEzHlcRI/sS8bLVPNcsTbkF2Zqt6OBzm/ylO2TvrvC4x0zHTdznSVP9zK/D6g34i0PhKPGBPJFokOcCeVsD2Fkvq40SfKAA9m+5w21dPIz7E2G+kkkyMJhkYBQyyEJEPGtBPJIJv2PpCNySAeUsXyje84Z0nBtkpU61kndNOyQ7/RHxsDXaYSxizpRB0skZUsEu/8/Mm+xsjXSc02Zdj0Qk2K1GnvIMCrUnES9dw/P+i3SneSxhvPxrkuX5iBEXWfMAxA8QSe9T5NtonS9z/JdHNMty3EpzXgp29h8hfsjiOaiNJgyUDWWyIzz3pUG/NZbb6d54E2UYCu23ofDenYbA/WS/f0pH1lcVSJiYD88uoXCODoGzSzSc5NFwliZDtzQUiqlmiGPcoejnXi/2MFwVfwqAy2+JKEZ/soET4bQwFPbhqkuiYJchCvZq8a/ODejPRszW5WneCditeN0T9hMc4R3aH5nNDqPE4S7aeTyhNb1Ja3sfvUMb0c2vHi115zmmS1P/TDjoHHJ023ksUoZ6O48LZXF/uZq9PL8gRlxItJmNQHEMvMROMIrZcCT4IWtuC3UrPVRf+EO/Mgq+K9LgPz0Rrp0C4FESDpNPEgxCOlyEfNKZpfAQOsBfGA4/uyHwlfSDp7QGLhvIPk41cZzAcL3zmii4H0iG/lgCQq+Xko0Zh8TLlZBPJz8nTA3RRnZPVMrPioEOu8UEzW7yd3bLexh/c5jpD8cFgZAON0DS35n01PvIFM6iSkY+XRL5eGTrPmnN4sCeYHTlJfRuexIDkp6hjdtVXm+nzPU86QPy8/UneIwjw3sM/zPZj1bNR5DiFQQJkxDc7BUE2oyDqVkFdEIYFALhPEGEPUF+G6EZJALJhGCCY7NIOEty4C6rgknSnnRmG5p/W3gJ3REgjKI2Ah7N2kDxQyCcV0VCOyeQ5MALjl8GQ7cqHIplAVAtCYTfniyk3axB6PEiKD/0gbRUB4mbHPa+ctiVOMKumxNkw1yhGmWGsr87HAaaoBvhD1PPbHgNaInEaSPQ+ZsZGLLuA3T4fjqylw1ByvwRyBo2B73jH6Gz7xOa+288PrIr2Qlm8/pFNvE6QSyetb2ZfF/zXvhMqIbLmDjoShLhEp8Bp6xEaDoFwXEoYdLxdM+Z3tDO84XzV0HQfx8Klx+C4LaR9NpKP2gXmqGdbYJmqju0ND7ntqFwzcmBPj4VgZeLkPKwA1zXxkH2lhkyxgtTPCCb4M5qzUAx14cwdTwiTpXCe38GtDw/Igxey5MQvqMIqefaI/tWV+Q86I7Me52RfKs9Yq63RsjTFATfzkTmmgkY9PpW9B10FDV5F1EVcRtdvBrQxRXoFdLEcwlYLHvnwCfoEviUxy2y/aHf8yNYLGm16R7y3fYhPpH0ZtgsBHm/As+0bjAPL4dpeiu4vVICj6nFMExIh350BoxDcuA2ogTeE2vg+VYVPGe1h3leJTyWF8KD/Bi3fUkwHo2Ay8YAGLvTOg+gOY+n+b5OemC0O2SdDdyHUBBNFR/4QEcYkOUt5t3vjkrLKG5HW4HW8XFHBB4vgHFDHBzXhkG1gWzPl2Y4zoiAf9Z0JLjtQKWEsL4nyxti8Ts05yALr4HVOwLczvN5RoPnI7A6RG1NN9Ap4C73g1gsKotj7h3eiArSCUXKE8iU7UCCZAWimy14kQsyG2HC24gUPqb+LXr/PkKE6fx1gvA9MoSdqNLeQ3v9faRIf0SYfBZC9e8jOOBN+IdPgCSP5KeG/PdRNO9eRkg7uYDpMDZnx8+DoPkyBJ47UhF/vS1Sb3VA6OkSOJHOkH3mDbvxBtiO0UM+yxuaz4MhLvWD5ls/uFako0i4jyKbRyiSPcOAENIBLUj2K6xxb2xebM5s7VlcMaMBi839fa5VnnfRI/Qp/5w1Viuqe/BzdPR/xPUEqw3GbEKUOI/7gyyXhOeWKFYSRlpL/1tMfsJb3GawvAyWX9Kd5Kmtxw2kyjcgUbISGdLNyFXsYDkZsK/Uwb61FopB7nD5MgIem5PgsioKjsuD4UDN8bMgiLNpvgtMsPvaE6pVwVZMlEr+/wIfmM5moLipH+LPVEB/jvROYRwSm5HtEy+hUFaHnl7A1+Tz/DDSGg/I9vvYnPpEMNzbiJ5hjTzmmNVjYzGlLH+jT4Q1Zp99zr7LaMW+z2LIWQ4KyzFOJ9sRIc7m+yIpNG9mK1j+GXvNsGK8fBnRhvCGbCHR5jsU64/xPAvmS5QajmJA/B3ov42C5w/JCN6dR2tbBM/9adCuCoN8gS/EN0gfVJOv0N1A9sGN2cin4uc+tzSHohs9jqdCM86H+5BBG7KRf4ywxckCKCeEIdBxLKIl85Eh34d8+XUUKx/w/F3mA7H4tumlVh7g+TeRjAYNVp1HOmBSjoXnULC4ctbYPiGjw+AXeQk9Q5t4/grbGykju8nmmabewPNXrHktu+izH3jcObOfDD8WOh1Gqmot4Y3PkKPdyf3rLO0mdAw9Qnh4IrfxCfcqoSWfR5xF2PF1E9d9sgrnRnGAq0V8zfxY7KhPFuWiWiYIznadnYZLvvGp87tdAK+VyZD3doWiC/kSk0MI0y1Gku2PCBffJf77FEnEZ2XOt/F2qXX+bL/v9zhilsfDcki6EW929H9Auu8BjxtmeRlvtrDWr2J+E6tTwOgwOo3pDAuXgy5EK5bfkE9+EcPU6aotvMYmW9801QaeZ1Lje4/osY/7UqyuWobDJu5L5Wr2E898iwTVMiTfbw+fMzkQF5JtG+nK8vcfkA68Ln7tB8V48zPbHPU05SCP7aJOsVYMc9ggVrqc1UzxfarfG2+RbydbuTUKiuXkf6w1IyR9HI/FT5D9SDy3mOhtnX978xPS9fUYmfIYX/ezWPd/08HzY1huEsvR4PafMF/PsPu8Pl/X4Hs874D5xCx+ls2d/YbJBovNZ/kpjG9YXiyr5VhCGIKds7QkHmCfMX5g9RwGxjYRjZ+iynwHOapj6BV6n/zNRqLXNiQov4aM/AbJB8TnvQ3XxEr9ILkgt+d7IEs8NsY8bIuI97IfygLVd6QJ1v1t5fs+UC8MhMNHAczuP5YMMHxqM0de6e5duTFBWEU8f4jW+wYq3e4i3/EE+bK/oqt/I/qG1XNcMyS+nsfDsz0Plr/C8iW6BT0lW/8U5e6XeU1O5v93D64lf9maG8LOAZieZPLP6jf2IF+Z4UPmI7CcU5a3xWxovtM+osFOTkdrLb0bRM86XvOr2OkMUmQHMKNFE49PzlTvRrhkLmRj3Z/K2ukXEYZVsj0gwWCb4dDH9HrI1vy7mY3dEHi7GO77U1hsN1QtjJC1duZYUVrtvEgU5HzPM0AYlBVr+1V9rpJwjNtT9A8G2ruRjaO+L61xOxPZ9LRGvv/Pzng+am/1+5kMMJvH9F/XoCc8N6BvhIXH5aboPsabpfc5//eNZHkndXxfnOkMZit/94vZOjN9yHADw45sn5g15i8zW8ryq1obryJZtpn0/0Us7A5eDy+TeCVU+i6bz0Gx3GWZZrDXVo+PYh54/pAEr1/S4LE3iTCgL+w+84D0M696zZqw+8o3vSD2Mt4XjYp4opXWPkWaqzC6TAiXvfs0R3ESRepr6OT1DBWu18i2/IZqM6v/SLwf+pzHIzM9xviXxSWz8x+WD8E+Y/v+HfweWnOygh/xOgCsDng78ptZjiXTlYzn25P/0M7rFqq8bpIOeMj305ksszq2LF+IyT/zH9jaszqn7LykhGSj0u0BUmX7yJbe5zqI8R/LtQ6z/QTqOf5w+i4c5mPx0O8hG044UT7Q94myMnyfc2nWVEN2fmVAVqsQnSROIZnnFSZ2Miy3G2v4MOJki0fua8LhFlCFNJsdyJQfQInuHKo87iFZup3ns1ST3Fd7PuB7OgOinxLmecJrlrD9DlaLkvVMrzH71ptowObKcoiYbq8y3+JYYHC8VU6Y/u9DcsDmzfR+W1p/Nv9S/VmOnxnfsDw8lpvG+KDa8w7p38+5jLM9tFySqeFJ9VgxgO5dTPIvI/rYroL0W2fYv22EtkMu4pNmosx/A+KEL3fnCgdGthKu9IkSZn4Q4zrkRJxs9B612W2TfQ+2FxqBlMZ20H+WjFi7ZTyPKUXciFKX35BFvnyG/BeUG+5wvcfy11jNUVb/nsk/W0NWl4/tefH5p1tzTdkcGR0YDbqHWGtmstxNlgs5kMkFzZ3pQPZ5pfkG9xdYTX+mM1oZz/N8SpYTzfbJeD662w2S8Z28jmiqcgOyVHtQ43Uf1R4PkCk9hRzHY0gYNh2B0eNR5rMBA11ItnR0X/OBRoUQ2VEwChU2ec3nO75tvqKZ4Q1pmY7XxtAuCoFhXSzkyz3g7T8Qac23IUn8CemEpZLF9UgVd6Ot612UG8nvNT9E7zBrTjDDe2xuTM7ZXhhbd4Z1phRY15bxPtN1Y9OtuKBrYB3P72ax8cwfYPuDwxKZDnxO/vFlfkbE9hEYpmP6jeXGtXG/hgLtCRrLTmSrDiNLcYjnyKXKN9O4tvF5Z0jPoEJoQkXHvYh6vzd88zsjuG81QidVwHMS+Q7Twy3O7wZCxWrrzvdHwIk8eB3OgGwBvf8mCM7roiFfZYK+vAhJAuEs8WekyDciTb6N72+w3A1W17EPYV6WT8NqPTP7zebH8gDZGrN17R/dxPMsmB5guYxMLzBdxz6z4j+mE58Sv9Ry/cdkgf2O9UwGWI0Ktp/UQn+OX5PlW7G8w1QaC8u1jpUt4flBrKWK20k+DyJX/huKmhFmKNuGsHn9kE+0kH5hhOOmALidiYPhSCyc98dAvT4UIuF4h3XkR26JgccvKTz/WrcyHI7kV6qmBSNGshBpkt38unnqU9zWFel+Q4+g5+gfTnMhmz23ndXOzSV9z/Qu8+UYnu9FMs7wCzvzZ/vfTA+yXDA2b0aP99rgRX1gK0bqQzaE8U930v9MNqq973I5y1TtIvmq5XxSpD1L89+FQHEc3x9iss/yCpPFdVw2WZ5hZvMjiAn/Cupf/eFITBeumc33PqSzPSFZTvhnuS/fN1T+FAz9wXhE3WuN2Ott4H8sh++Hq2j9JfOcYY7vjnRhL11zJ887LGc1pnwaaCxPuZ8xh3Db8v7WPW7G46z2KcuN/T03j535sHVl82M0YPaA9cw3YvWdWFwE6xntmH5g8sHsfrfgen4WVuNzl+eVpas2Ywjphu6Bz5CtPkA+zwy4iSXwFDve85X1LwoSJwVFyuZGx4srSuNlK7rGC8t7On2Y9YVqbyCkg/WQjXGC+LE3lN8HQvdLDBx2RPB8RYfN4dBuiYRuM/lAP4RB/MQbsnfMkL7vfk/ewX17rGRxQ5bsBLLlJ1Gqu0L25SEq3clnC2rkZ3fjMu/xHK2fJ1hzb+a0s557vf/C/2O5ojU+D3neLlt7ZgsYj7D8OdZ/2tlaL5r97s3S32sGs3PDu3z/nOl7dm7KcmpqfG6ge0Aj+XxfwySphEGacVbrEBolOAvRf7lH17JatCvUTpIrjnoW+uzNgtjPFQ6vekOc6w050YD5sMzfEV8z3ZW96nFUOtHtZ8kY42eS0cbpssGuQ8RuhixhrSANF2bMSJT9YGH6JF95AR3MhHVM5Fu6kW023yUs94j08SXi23q+9iyfivk8i3ta4x9eK7DydrfgOo5P+kfVc1+f6Qj2fBF2NsRiQlitI5YP+UUPa/5tP9Id7PtM97N1T1Ks4TghS7WP+1k+0j5wk5cgSj71nklWdEYqs0Ge6vv9JZqrm9LsdiwrEPa9mi2s6SAf6zEz+HAB3++Uk88vTvbYSL7/J7K+xs5iZ0O0mK79w9r/etuUSX7iYNIvc7ney1EeQwun68QDN9EtoI5sO7PzTdy+dQ9+TBjkMOGMw/w5Byyu46MaqwwwHcDyoVvoL5C/18D5nPl27Xmu90OSBQuvmcx85RUDgWX9rH4AuzbL285idWTk3yNOXEo+xgKESKdBr8hcrpkbeEE3mjBasRPklQZoYiOva4XQdoooY6V0iO6TZu1l+9wXxiJgXw4/AxPbuNz6s/v53mKPBaHi64ghvZIorgLxAOn77chzOIl2HrU875XZNZaby/K4S50v0Tj3kZ5cyX00do79dpmFryfzZ8ZnWPj8e4XfxMc1Fs7nA2Ke89zYasIKjB+YLWC+0nLCLO/S60n0WZXpLt13J587q9HuLw6Bp6TDPDZGw+mE056nM6H8LhDKbwMhm+UOSbYaiq5GuKyJ4n5LzJU2rFZ5kzTRcaQoSP/Ucx58xD6dWS2KZNKjOfJTXO7L9DdI55HvQrL3Cqup15FhOWaz6tEzpIGwfQP3c0enPSM8Rv6YwwHC6Hd5jQbWGAafnEvrSTzNns/DdP675VY5Z/Rj9UVGJVtzJFnO51tEn/5k+7IVxxEvW4kQWotg8RWYxIpZfA9+UPMIzxOZjX5nc6FY4g/xUx++J6f40Jvv6WlXhcP/biH0X0agmbd95z+77oJEUIaJ714o0p5BiRPhO/lR8u/u8HzHgZHWHPmhhL3ZWe7UAvBzq+7EA0zWGd+uGGRdb4bTMgiDtfO6zGteLx9oPQNm/2ON6UVGAyY7HX3JF3S9jT7k37J4KaYPBxI+LHA4hxjpEpBe543W/o0X5w9OvusyTiY21CD6bjkcFwbBfpwRgecLEXe+DeTvecFzQwqCLxfDbpB+3z8w92Zhspnb8tQn0c0PhB3rUOJ4E939aW1IhkeRLXuXdNzbzE9NYnWuLZjX0cLrmzEZZ/qcxXb9XrOE+aYJsjWE6Z/yeibMRjAdz849Gf+wmtm9aM5t3e+hWHeZ9MZjbJpI9oHsY77DJcRKl8NH7EWt901q7fkYs8RAj+Xxv3qsTWQ8X+d1OQdhewphm6q67rwhembsnba1Tl+FQVpJfmuZ8ylJnubfxHv8UQsWJ49iPhSrr5DreBRFmvNo5cRw/TPy5wnDZ7G8ZQunAZsH86uY3mb6m/E+wzysBjzz45ndL3Y5jST5ar53NyCK2QELJmY1cJw4lWjwKvFB72ALzy3uRTLE6jkyvu/iw2oPbqH1HgZnMWaNThrGnzsm76zP9t+Sec99ZTxsq7WLhYhm7oqfgub7HsyGskS/ldNnlq5QNt+bxWdBJkj+Zj7l32o6MbTMTxzSECcuQ57mICo9rqJQcxotnQlvmx7zsY0nDN+dbP7rhdaamgy3MBvPdDWLc2R665e3wGuQz2rLsPBThEs+hdGuHHluaziWYXXLmWywmv+sTltX3wZ0ocZ4q4vvc6L3XaRKf+Fnpu5iS1azjteAl3XWD3BfFQ/nT8JgU+zwLzUEbRZ41Pj/mgeXCYGX+PwHqIMMG2LhONEHEpV02J+Zu1YMrPISu1n3VpmuVy9B59CDqHFvRAvCO+29HvB6cf0j6vBmyf/LRR+aYOG1kNg+BstlZ9j24NvWfPD3CNeOjH9AurEWBa7r4SmvIZlp4rUMWawH28PLIt+1WHcWHbyeoKtPE+GLi+SnHiCstx4hdq8+1wohZTR/k8MoryWmDUk8LskmVjH0X8nsm85mwy8JDbqPgmHTvHmh7Ef/DtGXyqEZ5HVH+HdqsEpFWzdXMX8Z0y3piu3kS2xHlITVIXofPnZjkOLwLfl2teRz3SL9dIfL6iq6+9J+1vVnezpsL5OdYfFc/hxrzSKW18xiPSYS7htH/lC56TjCJB/zZyKwfRtW26RQd5zXCUmj+3b0u4lMzU9Ile3iuD1WWAwXl8wryjHOi/Wfhj9nZ9sOb/s12CQqqv6NzjIIto5bw694HEiBPFV3yGl5+Pr0h52h7OV+6u/b926+5DtcZmcJ6YrdJPOEbxz2Eaa/yfcP4hVLeM422zcrd73Ka1v0CL1LftoF/DTCgg3jrbna7PkKnfyeYhjxwnCyZazmwg7yA1j9qY9JY9W4Ek/rrpMtf87tO8uRztccQ7pyB3IdjvBWSHghTv4lYmSLEGk3B25pLaFbGgDX9XHkf4c2yYa5rm0eJP5h/VD5usCV/jcLYJgQBPMrUSjFYKgGetQKL55X+LdapPjxRIZp81S/clzH9m1bu53hdfwLHM6D2T8We5yvPcR9zalF99Ej4ggChNXoE/iQMP1tjE27iyl5tdwnZ8+WYnvO3UPukC28jU863CWsex0V7hfRM+g2ZpTex2t5D0jur9J1t3Asn6HcTn77AbIRP5D/+iXC7N6Eq7EQyq/c4bgiCNIxbrBrrV3y78mvZJHXOOP+BBg+jYB7/2Ck36D1H2mCja2N/5/Vfy/by/ayvWwv28v2sv2fbv/Jv82C0Owv+xdVBSz21veNKmvPCyhQfwFWF2YKL38Ei8RamKFRbe0TTNa+2QJrb/N7n44XBRmsvT698UVf+6Igw+Y/rHHwN//crF2zF3UhlC/6tC0XeG+xsfa1g6zXfTFMQfL77SZbe+nfvLikznp9dZ2aD1NZB/67yAfglx0ssZalqLX2zS6oWX0IQXJ0AZ+der219xi3gP88pWkTLzuR3LSJX0bdmH7B2pv+s6vH6kR0psaH8bJOxMv2sr1sL9vL9rK9bC/by/ayvWwv28v2sr1s/8cbK84pGWW8HrGrCLE7W8BtSQy0n4ZA8aknbBbZIypkAbJtD/M85FztbmRptyBbt4nXqsrW7kSl7xn0iz+LwSm/oXPYaXQIPoO+cefQPfI8ekZfQC9qvWPPY2gae30enUKs/6sOPI0UB1bb6nOUeuxBa68jyHH+GWlauq7zTygwbkGuywbEKhYiRvE5wsV32Hk4fM0DIeuhh2hUNIiCnd8/m37/afp3cPjQvCHZUtLUH36HsuG4KhTyhd5QzPBFQMmryFdeR4H8CgrVl1CivYQywzWUOp3jsc4dfWt5HMb0F/EILI6KxRKx1+wsnsWP9ou0xtmyWDMWc8TisVkMKqs/0SngFgqcjvJcIRaby3LPczR7ee2dloYzKHE5iVTlOh53lyD/DlGyTxAueReO7SMhTVBaREHa5Z9Nv/9okxHbN2vnsNj1+1hUYARCThexnA9rnkuVBqregciyP4oCCdFIcQ2Fqqtorb+DavMjdPN/jmHxDTx2ncUAsWdMsRjmMenW/LUvX9Q7YbFNjNbDEqzxUCxOhq0Lq3XFaj+xZywOiWtEhekKrcU9/hyecvdLPJep0OkYzwlKV7Fc+GU8x4/lzIc1fxO64GhIKx0hquVz/tl0/I/R3t7Dpp1mh+vqOLTEIASeLIBkntlanyTIAWIHA+ynOsCc1x35NjdQKL+Dlo6P0MWnEVWmRzyfnsXesfgzVkeHxZyx2CsWU8piqlmcGXsmBIvDY7G4LNaW5R1NLyb6p1tjVNjvee2NfOt1+kdaa/L0DQfaed5Hke4kMlW7kaHciRT5eiSI3yGS+N9NUgTRSaQxOkP0Ub/2z6blP9rEYHU72y66Wx4bk1CJ0Qgi2tvPMvGcTzGSeKpSz+OEZX31kFVqkKRej2JpPdobLBhGumREvJVev+fzsPh9xsss7o3FM7I4MBb7tprov3Ei8ONoa/xzJ7/nGBrfxPUQ+//ELGtMNIvz7hfBcuXAYz/7hluf21nqcg7pym1c76QqNqPE+QxydDtgsh0IR2kgZCnqG2Kco+mfTc8/2+SC0FzM0c20H26A2+YEzvdhZ0pgP5do355on6BhzyLmcatidyPEAifYpgjDUsUdPxaJpJtdgF7ewEeV1rir117oExZzymLN2LOhWXwt0y3j0oA1w4FfZgCbJ1tzEXqGNPJnlDGeZzmm3D7EWGvudfZr4r9j69cnvJHnI7J8PPbsOJZzHSlZhG4hlzCOyVv8c2S6LIHK1vedfzZN/zTPC4JCbO2yRjLZDcat8Shs6M1rh9jPIdp3MUBM0lppP5Jo35Hep+t+E0MdMthvU2Xb5haId1HhCNTQGrxF9Ns4wVqzkD1jjcV/T85p5M8A4s8aTAePhWYx3Ow5JOyZ1190s64Vs8O9wsDjxJg+6uz/jD/HrpN/Lc8RnZjdRPbagkqet7uV53Sz/PZQyfvI1a/EwEC6Bv0+X7sLjs3CvrMVBId/Nm3/XmN/YoC6jGh7WjrZHepvg5F2rwNCTxZB8iHRvivRPJloX0n8P9AVYhW9T9N+Jark/1JXL1FcMyJfvIVWCrKNKtLTAaTX+1p5vmfYU6LFMeT/f+29d3yUdfY2PBiSzNxTMum99957h5CEkkCk9w7CAiKIihVBsZd1d+1dUbF3QZQmCqiAuAoIIkiVpiAgNbmec507Qd/dZ5/P7vo+7/v7Qz6f+zM3M8lk5ny/33Ou064TsFG5hCbn/oQxaccEv2xSTr1ras5getEpPCu66YNrWI/bqnM3B8UewvCkn7SfkHP62DvYHLpN+UfIT8ceQ/bLc540593lGvcj3TZf8M8zyPd4G/GOKTC6+sGW73PICHY8YVhsgwxL5zyDFPz/A+Susk/2HijyXW9cHQXb/CjYn0pUuad/1R3GQ4JzhovMi0Xn9A3m3EEYfYI3GqX+/f/xffKMx0q6GTtU/s0G0E923A2iB96ZafY59Qn7Xvuq2Q9XLhdnjynHoXOp6I91KLa9g+bIFaJXDmBI/A86w1H57vzXa48+eWDYw8o5xpwlzR4/9i/W+a5RDjRyH2UZdyLZmIVQoxq+F8XAyPSF7Q6Tu8M2ub0/uVLWI871g2EzXjYs3jMEm/Y0LrIF/3+83y/qXOAYahsXui7quUKErSiG7T72UMUg/tMa5GxthuOJRFhHyz7PFltbH8h+lI3yONQwKwb+6T3lO3tXG5/vIq9oH9n/XIehgcCDgi3fuoz48jR6BG5Trlz2RjcEbDC5AXyW6zpwVmm21+Mo9H4HXXzW6F5XnrFA8muu1b5xcnA2idzJOzk2/Rc9T1wD9lOXOd5UnqZU42qEGFXw7mSB98wQhK0qRtKnXRD2TqHy0DnvT1D+GGu/QNgyBcP52WF42Y4ZnrbXZS26/9+WvUetq6dxWcT6oCezkLOpF7qeG4uAJzPgdW24zsBN/6oR/q9kwjpJ9HuGmzp/r9jYP/07vKHFxmsvdrVvQoNDsLn7Z/QT7TQtDfiz2N6HhrDH8axgxsNa08xeJfYqN/ivk72+AS1yPvqEfac+Fft0+0Z+r77AJdnkANqlPe2cA3xx5HatiTc5MY7J72w3+U9F/+QZj+o8tnCvRhguB2I+Lkf92Qno2zYDNUdGKOdh7pZmJH/eFUFv5OjMXNuMcNh6BcBI9YHh74DhsK8yLrJeImejUtajyrAaVVZPa5XV4lFls1xUIc8PNMKdY21d/K73m5P4RPT83DX/jtxtPQNqjZkRi8mXFbeqComra5GxuTuCHs6E1/BA5dOJXlGOgNeyYb1SdHyh3y75TLOMASH/Vh8Sr1zjwdfLjQ9QaWNP6xfiE+/CoNBzmCM4Z9Es8bcuYb/8ee2Hvjhin/bLkSeB16jkk8ofQq4Ingf2yU8rPKX9mH3CdsrvHFafbVTyMe0lZn07+3fZX9ocslVtsCn/6xHsUQlXjC/6nJ2GCbgFw3Ed6k+PR4OsxWDl2bsefU5fiuLv++l3dj8lOHWu6N+xsud6yFmvFFtXKjq3LkB1rmN6JOyjwkzdFeuEEeOCTX7O7/5URDycB6OLLNz/QS7ek0LyXM+k6GzM7O+akPJpHUIXFcJ+Y4zJhSp4xv1EivJbKddRlf8Ozj3/T85VJ8PiUWS8tKXG+AKc+djTd7f2vk7JbMO9ov9fY6/eJLMPhH7AjJITaArZrtwe7Pe7sp3rmxwd7GGs8flIdPvnanfJU9Iz+CuR9VHFPDwj3YM3oHf4NjQErhP9swJlzjfV/pIDLsAjG0aEDZXbB2NU2xz0w0yUHhqE0qODlJtpmKzHONyESbgNg85fieqDwxG7sgLuJ1Nhvz0WxnVRMMhpM12uG6Ph90IGoldVIExk5vtoKhyz5PVBwWIbQ2DtK+sV6PjcsHjm/CvZeN0U8bLPB5nI3dcHXY+NQfHu/nDfnQSvnv6qC12PJcNxj5xF8aeMWv9TRmfbv3yvf3X5yBeuMFb80M3YhYsDfsGIaJGnYPYba8UH6Gb2yrHHgH2WxESLr2Jf6Vnl2p2Sf079Yfpa7K3nuaAN5nxr9pJT/5MbhjajPmAduorOLzQWoth4tT3m86py9lH+7MFyWIIQ0D0MGd/2QNHOvija3hdxKyuRsq4OzWemYhTmokXWpGfrFOWRHYd5aDk7HQXb+yBqSan2qNnviiP/iLkO10TB/9F0RH9YhogPShDwchacDyaqznBeJ3ZTMIq1zPcXm8OY+4/YqvNlwZOcr6Sg+OBA5T6dgjtRuKUvOk8JgvPxJCSs76L8eLarI7jvuZb/lQ1KMKbEVBqrzna3H0K/wPMYKz7YzXXQmY3PCa58/VKR+WzT/2Wf52e3m7Ef+lYdvjE5HCaKDppW0Kb9/pNyT8m+36J2gpxF1a5lKLQtRInIvdzxLsqdb8u1SGzvW8p/S86rOGM0PMVUZT9Si6Gyz7v/OBFpG+rhL3vYdlc0gh7PQun6/mj6eQqaxLdskFMwCFeLnpqvfH5dfxqNzK96iL0u0Dl7xjzB35dHwDZNsJ9gRNc9CXDdJ7K/Iw72udGwTwpH1Iel8HsiDdZRsg5Zru9tFu9RamunBVb4vpiOulPj0R9XofHsJOWhDHguC50fiESOnIemc1Pg/2KmcucZTmP6f2vX04258VXGJ22NjgPo7XsCo+NbcUuDyZ++RvyrDeLjfnar/H+q2VPPM8AeYs4UZfyN/VhXVrZhVMpp5V4ze8vPap8y9Q25RNjrRrzJnh/Orle+JP9PVf+wNyjXeABJtmkwOrkR914p6jFZ9cyIc9ejZs9w7bMkF5rj0USEvJ6Hyh1DRP9cpeeB3Mxj5JEcrAPbrkDtoRG6boGv5sD5N8FMNwhmmhJm8m3JZZCHbnio9jN3Oz0OXY6ORurn3eD7YAq8RSd5hxhrPdMchxMWVelZi91WC8eqdBhLU+F9fwxcKzPRF7PQ5dBo2G6TNU73Wfx7cFW2cVdNlbG6rZv9OzSK3R0YflLjC/Rn2fP40Rxg/W0mvy9jbpy/TJnTv2VMjnZhdlWb2lVy/U0vPqtxnwEx+0Xmn+nMePIHsLeO/hdtAuXfEr5TsRM5UGl/04158LEkI+fOKvSQ/V1+fhha2mZitMi29/lLUbSrL1LXd0PWlz1Q+G0L6o+PE7nfqHInL3B/XIEhcg4G4kq0tE5H3ZExyPp7D4QvKlL7QP4v6yzBTFyLMSL/piDEfluL4uO0LdPRdH4Ksr7qCdftCfAuEEwV74Lj7jhYH46FTXxb2+yIj73/En0s9MtS1UcpX9bDa0wQDKvtn/rV/pNLsE+l4P+2Bvte9HQfxKiE8xonI2fY8MQTmFnyi/ZKsxeNfYcb7zbvGXNjjIj9eYxJTMk/r3xTvNiXylmo/aJMDi5y74zPOKWcNOSPIscAsX+t4CXKn/NH2Hsd2rkeAREBqNk/EvGHuyH4u1LE7emKtH09kbKjEenf9UD18RHo2fYn9JKrqW0qmtumiS66BDXnRqPizHB0PT9W5cn1GI5rMejslag/Mg45m5sQubgYPk+mwCDvxNQw2D9Kg21NGqK+q0bFL8OU57vuxFjELa8wee9j7MRSnxnDQnO9xffyvjHix5h1VbL2E+BekAprtd9Zw9LpP8I7/3hlGXcn1BjrNAatvK7RJzFefCTG/8elcQ70j7gki/Ilf9tZ7f1lLI46iP2wPAuUP+eAcN8zB0MbQL5j6iP2ApLPifiUvgB50cjlQ75D+m6M/bPnn/wmUUYzbBYbjCmhcHyUrjxm1reTYH0vCfYP0uD+JNvkg97dgLTdPZD6fSOSdtQjZXcjUvZ1R/K+Rr1yfmpB5ZkRaJZzRDtNLDtWbPWQ1tnocfQSZG3qichnC5C1owl5B/oi5Jsy+HyRg8jtVSj5ZbBya9fsGwGH4EzPdPs34jtcIT7V68Q8oUsKlVPWOlT2fqjjid8je15RRl//KmPNUcaAevn8iDGJrRgULb5W5EEMT/gZQ+KOyPWj+FubUG5frvGDcek/YnrRaY1RMwdA7kXa4A6uHcbk+EguujGpZ9o5xNZqvIEcWvSjmfsi/qfPlm9/HInGFEQY3eG0+MOa54J1QbzyA3Emtu3lRNjelHX4IAWuT7PgXpsN54oM2N8X3LlIcOWSNPisyoLf53kI+boUEVsrEb2zBmmHe6HgZH9UnhuBxrZJGCC6aZzYa9qM5k2TEPZaPvLWN6suz9jXCz7rc+DamIO8Y/1U7118/DKEvJAL7xFBsI0OUWzF82ObIZgn0rmJcc/fK39eZd7vL6s1vkZ35wEMiTqh85fZ7zoy8Rf1pyqc7yvnYqFtAfKtzwqOeQ8VjmXoLnLkrOARyYcVY/aJ+BaXl59Wnpz5YsPHpp7FCNFhg2OZh9yo2IeYh3a4wX8DmoK/Qf+Ig/Jei5Fmm4NI2f8+nSLgzPRF4Ls58H0hHeGvFgreS4brtTQ412Qi4JsiBG8shvFykq6P8UIijDdTYF0smOfjNPhuzEP49grE7a1D4oEGpBzugcyjzcg42oTcExej4vwI1Lddgm5bRisXqvOxJPg9nY6w1wvgtyQbtpWiV1anInpPrXLC0u9jbMf5VDKcDyXqGhg1/m2GpXPl/xuyt3haAlJCZ6+tsn6MGmMDOF+CfhR50hsD/i57fjHybU8p7yJxIrE6+4jJjVoma1JhX4nuAZvl+krk+CEag9ZjfNpxDI46joFRh1TfDE34UTlG6/zXgL3/5NesJhaiHXB9LL7AW8ptGWeMQ6BV/FK3FY5s8WPjxAam+sJWIfezIhC/sRbVZ8coJvcRv9R2TwzClxQjY1tPJHzTFcFflSDtYE/Uib9Mjn5ysleKtSwRhFog0swRS53R1ge58pi3o1l5yl0PCCb9a4L60caVsq/vim2zvZsMr4+SEP59pdoW2pPKA0MVryqfT4nfedn7vyv2J/bE194tYLzjjtDvEzMvVb47cj/V+Xyp8i8x3kCp8Q4qHSuUk4pcnNTXhfZnZB0e1rUgD2uBsUCfK3G+gCKHyctabluBnv47taeceoZr2Tdqp+ihzTrvp8j+gvK40vdi3jdH9H+mcbvY4JsRI3DC4emGo28Q3HMTEHB/OvyfSIfvgnSU7xwk0rhVMQ/nflhvjVJcGvR6DoI/zEfUpkp0w0TR97eKv3QXBv8yW/T4cCS/X4nweakIGhsHvz4R8KuS3ysVLPlgknJfGreITG8Q+Y8IoV753JgffSNtj7E2HTF7u6C+daLaj24/jTP1T48A1gZM+a/2u9hr+4CQe+2XRhywzQ+Hz1XiX3s/iEqryN6+FX1Df9RcIOVOjo4Gv03KJ8FYTmPARtVD9FvJMV9oXyB793XF8Jm225VLvkDWosKxRPnpLw7fo3F/2luuaa17pcY7lWfY/qhya5mx58vU/yIPEh9dHhFwFfkhZXM90vb0QOiyIkStKkPjCcrhJsV/1T+NUE7x6NUVqDo6XGdtpD1XhbT5xUgZVYCoihT4xvjD5e8vuDYOQZZyhFrqEWKpRoSlSa5mBIj/5PyzyJ/+8mzzsnUPOGVrCdpnW5AAxycZCNpSjBzRW0MFS42SdSd3uZU+b5zrhGHx/Ld9Xvnnae3qO9X31sRDuubXRcH7Oj+El/VBueV91T3dnT8I/j+FfuGH0Cdkr3L2D4g6LDr8JEYmn8TguMMa26fu4Kx48lWRn5U5l8agdSg2XtK4QpXIumfgZn2e8dCm0C2K+csd7+ie5/4n5qTsGfehzCOMngg0chFgZMNl52wCDzgy/eB6NgXOjzIR9GUx4rZ1Qfbu3khfXo+sJ2sRfnUqwicnIra3nIHQBLgtsXJlIEBl3Ygwj0ZEefZFgo38MpPUxnONk4xL5W+OQsSyYuV8tk0Vv2B6OPwXiLxfFNv+lwT4vJqGoC+KEL6tAkkHGnXuyETBUXWHxsBvYYb6dNSPRifrYsPb+KvhMGYZdmO4cZGtTvBSNzkfvOrEN+5ty3PfKH7fNyELchAoZ1XjVNPC4X25L5ITZ6Ha41N0MTYpr/LI2HOas51ZbM4PYb6dNUDzBFdenmfmx8YlnNM5QqzrYXyN+SzG+InpmdPi+vQM3qzxIL7WHLZN183E+s8g23Yvsq13y5m5RePOnD2RKvgz1hiOIEP8JSMSdrtT1qAzrE5v2IrcsJX6wprihHewTZ53CE51w2UJh59F1sZShWjLYCR7zUCa97VI8Z6FZOtMpNquVr8ui7VdotvIdZNkTEO0MRAhRiWClxXA9XSycu0zTuFPO7ykCOR2Ic6P29IFQZ8XIuzLMhQc6Ie+rTNxcetl4qf1gI9gAuaGDOYirAYMw6DuglHgC6PaX/PrnIVjTAyH87Y4jQX6v5IF4w5zLoTtavE/ZsQg1/oAaiyfo96yF/28RN4i42cEOy4Q//d58W+fYz5+0FncM/gAZgz4GuPHfIxLR36t68N4NPMrxJCM55OnuSVip+a7aHeHJfwka7Af/SJ3KbcvY6LUQYWuZ1RfpV80H+mWucjoNA+ZHrci3XMukj1nIKbzYIR2qoZfp0TRHaGwW5wiby+9rJ28YLf5we2SPRqQAL/gLAQEFyEgqBiB/hUIdXdDmL0HQryrEeJRjeBO5aJzKhHXaRRSLroK6Z3nIbXzNYj2GgCfdzPgfDkF9gcS4LwzXudBMD7n+2IGgt/NU+5s8qJzbkLgOzlI/7IB5fsGofbISGRs7K4zmmxXiS6qDxBfQGRvkzUIl8cKf9j6BWndg3VEsGJX6+AgeLb4wbPJT2csdG5xw7slFD7F+XCV5SKwS3dkNl+HIVOewuR7H8JVTzyNyS/8Db3emIrSZX2Q9UkD0j9uRMbCoWga8yqmZkJ1En1bxtiYCxiZclz5NMmlOa6dV5jzF1j7Rt5N5h8vDpJzkrAUkX+rg/99qfAbnoOAGpFdXilCkrsiPEb0UEIp3HmpcNXGwtUkMhkQCffkaLivjIPvHJH5HUkIekD0w2NyiQxCxDaHLcxEyJPyfn+Oha/YU59rI+GSM24fFQyjORD2smC40mIRGCnrEdYDgUFlsC5KQtzfu6Dm4AiEv1ukeR274Bv7HbEw5H2M+wVr3hurPI2OhxM1vhq5qAQF21pQsnsA4tfUIIB7+s9xsM4Ig/egQHj19te8jPXyMFnTOAQ8nI6oF4uQtLgGqR90RebKBuSs6oG81U0o/qovyvYIJj7YH8U/9kTB+Sq574emHZei75brMHTZX9H//pfQ87o3MWDARgwu3oOh4ccxJrJNub/oXxFXEl+St3w0Oa/Tz+icC/KX0iebkt+m3M6sSyTPKX+2hTx6ReJPzLoJ8X/th7AreyB8QAsC+1bBOS1JcEkcAgWHh27MQvB60S/rsxC9owRR35Yg7KsChPw9D5FbShD5Nf+fj8hthYjfW46E/RWI2JGP6N2FiNiZJz5AKWJ3lCH+2wokfF2J2DXyHktyEcFZGncnik7pjy6t41CydyD8XslUvkiV/c3R5IyEcW2UWa9za4zG8oiXfGWNI94TzPt32a+bBfd+XovQpYUIWpyHqI/KkPpVA4q+74+6o+NEX11+ISbVD7M0vsFYavWJkSg9MhTZ+5qQvF8+1658xG+oR+mfb8MVzYdwV0/gGpHX1BhgmE8rhvudx+hI2cui90ennNI4G2VPfmHKlryTY+Q5Ps84G/mkOWuB/jA5eclFzXVgvShnEwwW29HTR2yyZR2qPJejyms5Ki5ahILOTyHb/mekxVyPhC4TENtvDKJ7DUNk94GIaByEkMZGBHUVnF/TgMDyWoRk90RoaW9EVoxEbM0URPUch6iR4xA79k+InTgZkbOGIfTWngh+WH7nFdH3S4sRul70+eZ8BInfHHWl+MyXx4M1U5q/IgadF20+TglXfjP6HZzDyjyL66+J6rOFvCny+qQaWSL/8v1D0HRiisbAic14MS5ed2Y88sVmxHxZg6A1hXB/ZPrs6ls8H4fOD4rNuT4RIYMHI6XiblT5ih0VpDQlTuSe1YYR8eK7xp/DqKTTWldFHmbmGSl3crZzP5PHelo7zzXlSj5vnXURd6A9P3ZKOR+ZHyMXJh8ZgyPnK3P0rI3gPAPaZc6BU9/CJj6e54PIttyHfMujyLH8BZmW2+T+EXnuHrEXc5BluUPuxaZabpbXbkWB5XHBcO+h0rIUjZ03Y7Bb/D/3UfSybkWZ5W3kejyEXMfDyA7inJVbkJI/FwnFl8I7xcH8CwzWSJE39MZo85oUbs59lecUC8m+d4mvxlk5xEjkKSvfPQh9z83AaNyoMabupyab8t5UDd/l2fB+MR6ef46E160R8LpNHu8WvMlY6hvitz8VD+e8IGTPnIEell/QbGlDo+ch9HadxZAwkVE2dEbupXlmrdvlpab8dHZMrsk73SFP1uiSS5czJ6a0c4qTr4z5Rubrp+Sd1frQy9trSTmPhOeGNoH2oH/0XvXLGBviGrBevcPPZl0EfYoc4z6tU2GtFh+LxX8oERzL14hrso175bXXFHsxz0COLH4Onk2ucbHxMoptgnu95Hc7L0R5J/EjOz0NY2yYOV92drvsuecHhsAmNpPnwC2+FvMNgS9mqe4PkMeoD0XHbKxH/u6Lkbu7BXFf1SJgdb7GpzyfiIHXPZGwXhdh1r3NkLPz13j4i20il6K/+Ir2F5MQtbUKrvEBCO/ahLrOWzX2WW/sRQ/jBPr4tOF6kdf9A834Jjl+GefnPp7Zvg6UH3UKL+qUGaUmn33HbBeuD88BOZ1Zc05bwFgoHzvOAOcDUG/RXlNevQQbMS5kyngh8u2PyRr8TR4f1Vko9NsYt6Ccyx1v6zwMxkEo+2xZH+ZwiG3pE1a6FmvujXMYNA/htwaVziXmPBV5L+aF6kOWmbJnTVrPQNgaAmDtGaD1Iz73J2neIPL9EtU1rG3wfSkDfuTLXpwL92vpMJ5KgNeTIu+nY+H9TBxsz4q/tiAJxiMJ5tnpJphogeir97OR+FVX1BwbifJdg5Cwohox22sR9noa/AOLUOb1rvi934r894j8j6OPE7ha5PTKZJNnkrFM5hs7ZmnrnJ28X+fMjMsQ3SSynlZgvsa9zvoTXpcWtck+P6Uchnxtcp6przrm1FCXcY/SbgyQc0A/meegznet5uW5v/OMh9pjTW+q7Ln3Nf4kr1POfI4y5XP0w3nlGY+j0HhBZ71xFixr3onROKOl3LFYsMIBjM/bAZvoeetV4fCaKvK6LhIBT2ciZlm51h1Rv4csLoD75XS4nkvW+jbFQqx7+VOY5jYNznR5NB7GohS41+Yg5huR7WeVsM+S9013wfG84LSvi5F79OILudKirX1hrJPnHy5Egn0q8r2fQKV9pfKo93KcRB9XKwYFtuG+PuZcF3IVs96c+TDG9i9rn11A2REDUfbU51N13hPt8wk9L6xLZy0u9zvzxDw/rKvumIvEc8TfMW14m+In5uxZU0SZMV7aMSOP+55ng1etz8r2uUCv6PPUN7yqXEvaZwYtknVZoj52vu1JXSfOYONZ5Jkos7+P4Ul7cUWXnYhcWoL4tTU6B4z5ftY3ZG/thYiPSuH7diYczyWZ8r0nxszndwkw62gp+1mR5uO08DPG7TE/2Belnvf5KAsRWyrVd3NOkTXoHoCY+wtRuKwZxSv6oHr7MAQvyYL9ylQkeV2KbK+79IyXGK+hi0POpGMnOF+z2fc47uwFvDDRnCdOzl1yjjPn1TE/iXv/kpxWlSHxJnn4yafNXIGp59t03zMnw7rnjlp05e1v11cdM6s6ZjCxboL5BeKnlojvVNacT8wYB/ld6/3Wq09NrnLabMYzOJ+ItY2sB2NsqovvKq0Ho09Ie8LcJmN8zDdz/xfbX0VD0CoMTvtUZ3ZynivjSXW/CNb6tBLudzJhPJMIG/G/4E6Nd86JNmuKhoSY85smhH1vDA8daFwdFW3cFKW1tNZXEkOst0fdKna3NfSbMhQdHYDIZwp0hrl3jfhbsTbYevvCMS8BWY47UeGxVM7o83rGyf1dbLyOCmMZqm1fYFD4Lxq/J6cxuUDJ9Uo7wHwLMT3lNqkd89DH4hwvcvhThpx9xDroSwuJ9U9ibn0bHh5uzoTgOWBvBteBuJTXzHZ7wfegPR6edFRtM/1mngGtFRWMxBoK1hb1jfpeZ0N0Fx3O+BPj3fwd/jxjTPw59tdwlgbXkrqrwvXehXlKpQ6z5qXQ+bRg86s1T1l8ZCCCVxeqDufce8X8l0eaPsADcabMc92HROcvc7yawr290stiCTcslijDclG64bIPsiW4brEPD11rvJTU5r0kGXE/1CF9fy8ELsuD84UU+L+cicTNZUjpNh21lo2ocTBe9ppiDMZ+Od+B/P91jq0Ym3RWuYPJwUy+3VVzzTXoyDXqbJXsNsU5Q9XGitxEfzBXw/pE7nXOIhka/7POHn56HDmczfVjDw1fp+z5yNwZ73mmuAb0I4hz+d6MadT7rdNcDeVHPcN4B2N6nGXOOY7MIfSV/U/sy5rriyN3aI31MPHzBsce0rnode4Nyh0/POmg6jDqpyq/N5C1rxnRm6vhXJIO68NiQ68RmU8OJx49ZUwM+0b2/znmdxyL02AfHvazxWIZKnZ4iLNfWJvhtms9HWMN3OP2a6LA+FrynkYYq9Jg+zAFxrspsL9s2g7rS5EIvKSqrcjyPCqNj5V/t9j2pvKOE58Vib0iF3pPv10622lkoshWdMmMkpN4bGSb1qNwrm/HnDdTRqcUu9C+Ue7ck8R7QxMOqI2jvr04Yo/87BldD/pjHTliriNtgdmrZ+o1nqdhiUfVh6M8NVfsuwG9Q7fr32Dussa9QmN+9C9oJzgrijXZY9NOaE0GbQuxwLD4Y+gdvBPdfDaiu98O3N3HrJdh3wf1VLFrAZwfCo55PVlrmLVmbmTofqN/8PVGmX8s65XtjybEe7+RuCFwWzHqD44XXykeHpaLtlp9bGe1tplrJThVZ2c9nwY/2eMBr2absQvOXJwRccSYG/W29fbQ+a7mxKczvOa3VXgvM2eXGCIrX9lHQbvQIL5XtWO1nIm1uDhsv8i/VXyv0+gfyXry7YpP7utnzo8griEOpf7hHuW+Z2yHtYjU2Tz7vUX/Mt7JvPvguP3685ddmDvYdmFuE/c8bS/9OK4p8Sj9AcbqqIu4xzmPxdzfregXvQvV7qVaQ8Tadv6tppCteib6iV7iPJ8xqceV635g1BH9XlXGWoxIOIbXppv1NAOi96DE9jby7A+pH6T6fUJYq9ESfLMR7XT9Q8ze2nlu2JqE77tpzQ99rJgXiuF3RQK8q3xhLXDDXuIP+0yd+WDWGgk+MkaFHjKq/acaFg/lBi6zjPfMsNz0Zan1XZ3t0uDagQGhpzA5VfZckuyVGPFHI48odvmTyHVMsvgANWZf0fQizrdpxaPDTRvAfiTu2WntNnik/A7nfVF+jIdypk+HXqJcuonNfHzkecWx7FtlnQrnfnXgVtperiX9N8aOWJ/C3+utMesv1c4Sv3OWHuckUebU8eb87JUa8za5xpnjPKXzmDg7jtzTNU6x2+6/Y1L2Wbw9y8TRXFvaumz7PToPVPb8N0aWb+pvZJ5wkZ/nGM869/3Oy6P3Z33WQ21E1qE+CNlSqnnl2K9rEXh/OsKeyIU1323W/tJ/nqT+21NGuDP0t+uYZEz7sNB4TmS/ET3d+9BfZH+pfN/xiUCL/ymMipf9STwoWHBwzFH0Cz+CyTlnVN/rbJVyMwdA3n3OU6AOIsaZVmD6mFwH2t5RKT+r7aV/wD3IHse4zrJvohZgXrczmkugf0B7Oz7znM7p49w50w84L+sh5y7pmMq+o9egVvwB6hvOHWTMgn0z3P/0mamPyh2L9DnONx4c8xOGx3P+7wF09fkMJdb3MTT2R+2hZf8Uvw/tN/Vtltg8oyVora3Ad753mvOGzoWOBV69/Tf5XhHXGvFoHlJX1yFvTwsyv+uF4JUFsL2WBOvCBFhfSNBZ4MFritSv1R6jS0V3dQ/cYM/0vcCf7WexdO5ksaRGOHotoD9fbaxBo88uNPsdwpCoXzCjoBUtQQfRzbEDfUOPYIz4iqNE5w+L+0XrfsaJLuC+pJ6gfaTM6YNR/tp/3e4Pd6wB663GiA6mLu+Yd894QJHjWZHj38U+/HRhNix95sntmJN2kjFR2pERin1+UfzJHIKJf1bL/Vb9W+MzT2kOgb0faosDv0JXeZ02tUL8W87IGhXfqrPhuji/QLltlWDdkzqXgnMMOKPvkuwTystNf9mYHPaLfXYU/O5NQezrZcj+qheKDg9AwuauCFyeC/srifB8KBLed0a02m6L/N52W9R645bodwX3v+41O+xu7+mhD4u/u92YHbXbbrJPevnNT5obcXP2JutA3x2OzNBz6V436pyuRp+derHGfHDUCTQH7kG5Vfwu5xb0CfpB9P4P6B9xCJdkCk6sbp+n0sWUF+VPvc9+pDvbL/pWxKKUC/2wsTpb+aw5gzTHnCHJfgDy8HP2Jn+mI25EH5jrxhwB7a0ZB9rTjhsPav6Ac+2p21nDyHjBZSVtque4z5mHZt8250BSz9W6VyHX9ojG7/pF7NO5jOR/7x6wVfTnSZ0fwT5Z6sCxKcdRZlumdQKMZQa+kaPzWZO/6YaQNTnwXBCITjf5wGuS/1nbiNC1jl6RT7orUwYH+BcnOi2+of+Y1/W5Jtlt9At+1RgfttTrrsgf03f2RLdT42FfGIWYnNEov2gxauxrdZZbpfEJ+oYdwNjkc7I/1un/ewf+gGGxZ8TenhK887POuuL+Hasz3Y5qDHNawVnFLsSPt/Qy+3t5BojnGX/riCdwf09r92upW5h7ZPyF+Jyzby/s/Vzzd3R+Wl6brhvnX9He8neIdVgfqn1LsgbU2fQrWGM6gnN4grfpHu4V8rV83nO6xpwNRl+L/i9nxdJ3ox6bWXpOa7c5I3CO7Kturs0osy5FqfVt5WJwLIhH50f80GmeG87h+Ugqm4rCmHuRY70LRR5PnSzxeG5rjfH+hgbfVcsqvN58p97vhdUXJz75RXPY+2+nuUbf7V1g/9AYGwqvGyO075e1qCmH6+B/WwnyOj2q88pq7J+i1HhP53kNiz0u+2Kj/P339bNwpiDjzCMTRQbx3Hein+KO6iwx1m9yv7GfcZr4U/Sd2MtCX4ry57lQvFdoxhE6YqI6VzGnTXUJZ3BzLWibiXtoS6ivKM8r2+Oi/L+Zq/xB9T/lzZkAfQRjEtfSn6VfzZltl2Sd1/+zhpQ+7cDoA7IeW9UOVIq+4zxa+mXkE+kRuAXTOOc0W/aDYIx653co8/pIMMEmlIy9Ex5/c8E2NQpRFcNQEfUCBobsx5Xys3c3cJbpx+savX5Uu/yoBZ5hlpJMR0b0HFe3xB/901MR7lsG37hoWHv7a/yUNe+JK2u0D8y5JB5xaRPb9/5nOsOvSnQj52rVOteK7JfI858L1v8efYL3Cgbdq/NpGOtnfxbnEeoMX7EBxO60fcwtUt4606/exNJzG36N7fPivuZadMSYua+pY2hXGeecnGueI8YmiIGmF7epH0C9Qtlz39OO9A79VmvWmU9WPRRs9q6O0vzmCcX9nAPJPpuLQwUv+W1HnetLNLi3oIf/dvmuH2lMq9axQWS+A3X271Ft3Sy+/Zdo9jiC5pZPEbMuDUXJj6CX6La7Zb/cmCV6M1owXazYwoR1CLe0/IXUwVZX51usyfZltv4BJ9x3JCD0zTz43BMH74m+sF0RBvf9yQh4KQsRHxQj4PVseL8UjoCBFSiwPCn6ZaX4VItQYryp+qfasVLOwSKd/9EcsFf2/QkMEFvVP4L2lziE2KXNxOvtcU6dKya6Y1jCUV2XjrlqHXPniIO43/nIi/ubZ4QxHuIizYnJGaAOoq7nrFL6YIxPj5e17fCZhsu+p82lz8b4J30H1q/0j/pBseX4zNN6LvpG7EWfkF1an1Ql36nG8ZnOSWXfGmeE8rxzdiZnFFYbn6GLwdla3wjO+A6D/M5hSqzs7SfnobD/zSiLuxsZOVORUz0GiXIGwsq7IKibyLc5Eu5hYfC9NBbOq2LgvD0efs9nIGdbM6qODUf40mIYD8Zpr6nfaxkIfC8X/m9kw/5SPBw3J2o8ocz7Pf0MrGFj7y6xgfmZV+p8w9GJZ/RcDo8RDJ4F5Sahb8S9SZlQfsSE1Bn0j8amn9W5LtPa+15Y939vOxalDmEPkuqnOtM+sza9Y8at+mkiX9as0F8weWvadN7z2IxTuubUQcRBrJerao/1sIaIupCfi2tLW1DjXC17faN+tyLBkfnGE2adnmAM1nFXG5/qviPe62LfhHq74A7nfvTyFt1qbUXNy7cg66YrMNByHp63+8D9cjRiNhQg43vRHTu6IHVnV6TvakTClq4I+aQIPm+mw+e1NPgvzkHIyiIEry7SugnH00l6+b+RhZDF+fB5KQ22JyOQkDIV5Z2WaO0aP1OVfbXonM91/9c4BYP6bsWQ6GM6a4ozRWeJLDhzilwx/I7ENB24nzFNxpQ7ciY8+9QbnKX+F85lGmDiIupxngvqJ+om1j7/uX1mE9fGjFm3aZyU54FYaGo7/mEfJJ+jruMasaZI+wN8VguG2ah2YUqe6C9Zqwbfr1X+Zfb3NO/CnkniyCrjYzTY94msv1e5VxmrdWY2fU321dZ775T1/BLpr06Be08w0nPuQYNlt8rPtTAVgSvzEfKZ+LUrcuBemgXXova4xMJEGHLZ30iGz6IM+C/J0bmlsRuqEfBmNtijFPJevvb/Wl+MQPDIepRYzDhmldrd9ahzfo1G9zfy2TfLfvoWvQJ3CgaTvS+6epLIX/3c/mZsh/uZfRf00znHjBiUOp3y43mgTzA0/qickVbVP5ztxv6LjhgaZU3dRJxEzhr6Ch0xH43bFf3q604rNO9pI0xbc1axUoPfF1rH29X9mc5kIl8H55w3+m2Wa4ue40zjTsQYQ7Vng74Na1UZUyk13lYemwpjuWI76p9a20bUdFqH6JvGwnkkFt4PhCC8ugV5QY+Zcc07Y2B9Ol7ztdYXE2AT/4pyt72SBEPk7l6eheivq1FyZCAKd7Futw+ytvVCwHs58H09E27RP/bH5XfvDkda+FxUdF6qcTTKv87xDZr9D4g/clwxziDZ9/QRyZ9xXY05A455Rs6v5lxR+onT8ts01rlqHvDoSFOmzC9yfzPGQ118SbZpR4lDiUcZk+B5oU8wv/2Ra0kdxbW4sT32Nrs9dj01/1fMxJgnzwPPF/PF1c5VWnPNxylid6gX2Y9Q7/d3wTQf6cwy1q9R/qxnk8fP5HFcpnHH0Dzj4aFFxovXyhr8rdh49ZVi4/WlRV4vrEj3ufk1n+VZZ3035MA2RzDL1AB4c04r627vjYX9ZdEli9LgfD9dL5/lmfD9JAdB64sQ822t1lQXHh+AzN1NiFhbpjMQ7S8mw/5CkuYhvW8Lhn+vEpR1ekd1IPd9tf0zPX89/Xajf/iPGk+bmNGms936Rx4RfX0eS2abM2xXzAE+vhk6n/OK8jad28ecC2cUUr9QbowrEKfQV6I9pa7ia5Q7uZaosyhz6n5yZd3XfqY6OGyUO6jBfC/mwbiulDn1+wSxrex54pzMCvsyzRGWiE4fkbQfV8u6T8xolXP7ndbBxxvjEW40ytWAMKPutX+3/tV3bc6msI1lglkiYJ8WAZ87E3SepH1hEvw+zkXI30sQuL4QvqtFB4nsmctizYhrWabGSe2LU2F/OwWsy7X9LRbG7bJ2cnaMW6JOeY22vZvqvvZQpbcZ1+xq3yKY6xv08t+tvu6gyJ8FZ+4TjH8c49PaNDcxKO47wfCnL/R2vXOFyfvDvCH9dPqMjNlTlh1cTNMKW3XuOPH8n0QnU6fQJ6ZsyfnD9eI6MG7HteTcWT7H/AvfU+cydzPttzmT+5z4F/sVB43POK0xs0rHUuWRKLcvUh9gmpyV4XEnFDek2q5HpNGCQFvBOau3fTblavcx6j28LeX/Su6BvlHKYRP4Yd6Kgr39NI9onxwB/ztTdO8azyXC8UqKrIM539P4SxyMe2JhuyPG7G+8Ra55UewP+Nl2Y9Qe47qor+U9lhqXhN9pTArv12mOJSDZcllFgfezp2n32T9B29PDdRAjBWtpPCScs4x/QkvYfsH4xwR7nJXv9rXYhG0ix9Oqx9nbS53N2ZwLJ5l9Rey5vrXJ1DGM/VCPMwZJ+TNeQ14I7mniHuod7nXqHMr+lUvNvDHXkf4n35c9Y+QVMmcfQzEt60XpW9CnqnB8oPkqYvsq51LRNyvRJ+ggujg2Isd2P6JtAxDfeQSKHH853+j/ycq4wEGLPMM8EegOPzgocc1zPYO//LrOd/UWeY/Npc6XPmuO+HhJhePl52Mts0N8FiR9Ubx3ABzXm7F+17z2PUwdNDfajIXOijhmzIxYL68/aUwLn21MCR9sjAttMIYEZxg1AYFGJ6vtn9bXkh6VYly1h30lJWJ/KowPNc7Z3Uf2fuRZDIk6g/5hx2QPndEZi4Nj2U93DhOyGFcw+xUZg7yy4rzYzvPKcUV7zFgn4/20p3Pa9yz1BnE7fST2A4zLOKF2mbE0rf/RGrez+vPkalrYrsN4z7msjEGSu49nhhiXe17PosaMN2l+kHn1Usdb2pPEOaGVto9RYnsD8baxiDCaWgOzS1uD4ouQ7BoJX1ssvGydEGBLQzdjzcHu9n1Pd7F/Pb9rp6+mNFh21uZaHsryDU4p7FzieCR0YR6K9w+A7eYoM2/OfMtN0R+J7/qyMSNiiuzlcmNIaOB/0kthN1w5ogv3sX473ZgrWOAx1Z/0+xp8tqF34H70Djig85xHJ53BDV1MHEN/h3Eb4pZpBeeV46dG9lyNzyrlq6LNo24nxxX1Ee0s7e9l7XkS1hL0Cv5OzsN53e9cG+pz4pnBcT/qxTg/bQH3PvUa9z5l/9bl5joQN9EW0AYMjf9JdU8Xn081jlDMHhnZT8xNE9+zxiqiU48tzqGJl7teSTgX/EQKvMbaYRsaAPuEcBhjQuBZ5LndYrWMF5/Vz1Jq6dV5oNfztulBO4zhwfCcFITivf2RsqZOaw2VA6wp6Njv6R8S29Mz1hh2knXzacYc+ax/U14F4i/OF+WczVrHOlmHb9ESchgDo3+SfX9COfLGC+6mDBmjbwndLZh6reyx95BvfV5jxozBjE49LnI9r/KjDiGm4ZrxPAxPPKKzayfnHVO58qwwHsQ9zRh0/+h9giUPasyBMVTFqn3NGDBjwa9fZs5u5Yxn9hhMEPzZ6LdJ53RS7xcaz2o+mjNDyQsUZ4zZ429JSbXstsSGf1mOimPD4P9utmJ3nxdSNb9tZ119A+vAHW22YSEIfasAwYLNPa4JQcpn3VBxeCi8b5Q93xxEnpSbjEjnP8Uy/90r2hhUnGLMOp9vPG76tvSxFPOsVp+LcTadaxu8ByPiTmFmIf3bsyK3n+U6qf4OcTd7E1tCDmieekj8Aa2TYRyddRpNodvQT3x/2lbaAsqPXJLUHbOrWtVuNgRsEhtwVG0vX9fcWAV95ZNmTbT4TsSYlDHXh37Z07JeCy+B5mOZ4ye+4ezdrs6NojtXgHmiLOMuzY8kybmOMvptDTIKo/i9Pa8Nr3evyUH4lnK4F2XC9Vqq1ujYn5XracHsN0VpjQh7KVij6fN8KtzvZaKubQL8n82Ad1ff/UaCT5ffs+95yZ74gj1w3O9V9o8E52/Wi77exSGHtHZ2dNI59a844/qBgeYcVc4IJzee5pyyGHdolf+f1/1JHUEZjU49qrqYuSfWM0wUO0H9c2ezKT9iG50zLnt3WCLj9odVL/FnHhxq4k/qI/rNwxJ+VltDnTe70pzr/cgws6aLtRWcvX5J+nnxETep/8SzS13DGeSZxq3E9l8bhj2843t3vjfizeS9jSg43A9By/PhXCgY5qF42Bck6nq4X0wza3VmCb58PEVxY8qRHojdXAuvCUFnDYdR/HtlH2Z0bcqw3abz4ut8NqDWtVb1PWXfJ0Bsazp0jjnrZzlvmX7uVZWteFCwzfMTzZjAiKSTaocvK2xT/hL6WW+Iv/vCZHMNWM+mPUPk6wncJDbhiOoL6m+dhz3GxKYdPi/xDLFRB+5nnRZnvbNvbEjsMYxLPaufaX6jyR/BtWbMg8/18P0e5balqu/ZK5RqzNZ+oSRj2ka3ER3U8b07DXOPiF1bhWqMQZdzYxDzeSUcj8mevzEK9icTkba3J3J39IbzcXnu1mj4PpuG6lMjUdo6FPZnEmHt6v/y75W9jxGZnGa7cXcX1+eCbU7KPj+j+ZN611Y0+f2AMYltuKLEjCuQZ5Y+Lm3ulFyTC+DSglZczjxS9inlBdB4fIW5Z8lrwhqBBRNMXU+c2RS6VXFgmfGBYJ9T6iswLsFcNm0of4/2gLqHZ4P/J04lD+KYlDMYHn8CvUN2oTloHy6Tv03OUHJb8v0niB/SJ/iA4hv2UqYYVyoHX4ZxExKNqa/aDO8L/F2W7vYRkS8WofHMJCQf6I6k/Q3I3dtH+xetY0O09iD1UE80YRpKtw6AzyMm/0z8oipEfFxK3o2fjCDH7+Ly9jOS3RnG/B2M7/ULPo5xyZwl3oaLg35Ek/8BDAj7BdNFxlfLGecakO+UZ/wv/UxeYMYnpxe0qd6gPaWciROJZygPYk3GH+hrMb/CujTWIjDXTRzYGPSF2lf6VLQHPC+0vR2xhcdHmmeDen1q3nkMjT2BAZGH0S9c7ErADlxTfVZzgKzp5Z5oDjygtqrIeEl74th7J/r+tGCKq377vT0G+d0Y81YpEtd1AbnAmAM3PknTvpW8jc3w6uMPW6rrLduatBsSt9cfLDkwEM5H5QxMDIV3lRve9X4/2OsD83/v3hed+FiZ8Z7qHeZQmoJ2qG/b5P+D6J3DYsNOYWxyKyZltGF2uSmHO8QePjzElA1nqhNPEqtQ9stvMP0jzeu2mDhxwcR2HMPaBtFNI5KPahyYmKjG9TEGRB1S7Mr1oq6hLWBdxBXkK+5u2pnrq029Mjz+F83rjEyQz5V6Rs8debTI6zRRzlaNbaP6K+x/DDO6MabwYohRkXZhz1ssnayTQ+/L+rwHMrb2gPXaCHKSvGebHFrpsSB6TtiXpcje3qw9bjaL5wj9nXs8wr0Wxm03HoxX7isj3Q3D4nXX75V9rDGijvz4rNdkzSDnmDMOSz3Eud69Aw+iX9hRDIw6prrnSrF3VzBemXYOs0rbFGewlv/ZCWYcgPJmjXKHb8q9T93z2W3Au1eaa3RrT7HT6acF529FtQ95+p9Apc87GJ7wS3s8+Zzgy6Oi106LfuPZknPQVewO8+uUf+xpraug/SHG+VN2m9qjCSnnxFZtFZ3/AYjfYowh8DfS3v/t95V/bvvlkW9mbuyB5C+7wXpFOKy1fvMuvF5r83e8k3Yu92ALAq5PRmeH19CO1wQDzbc/lQTXo8la12942576PbKXz1Ymn/Eo9SJxMbmqm4K3aw6oXjB4T//vFPMwxjkm+QymF5q47irmn9LPae0U9QplTf3MOmZe1EOM1XM9GG97Z5bJsbTiBhOf8NyQ+21myWnkWp9AtPcIBHgXosj3YbHbzDe2oTmUvJObFe8zRzwy6TgukfM3Ma1V7NNpTEg1z+JUsT+s8eof+jPq7NtE569WnUafUfDESS/DEnFBtkEe+faZkZsT1tQg/P1ieI4Pgq3cb/b/Y32ybJ7GG8nf5R/th5iF8jMp9vs7XvO6OeJB1o+Hv1UIa70/DE/ruP9e9uk14UbDCcb8iDcr7ctRZl+MHqHrcAe5rOW09gk+jO6+36IpcBdGJ5/WXMXMIsF8xW2q8xnDZxySsRyz5rhNcXpHPIG4hTqfdeaU/0dzTT1OffWc6KdxqbvEL92onIfZjtvh65WNUenfKZ5kvJ41amat4Tqtdesta8IaRtZw0caSy29w5Cmtfenm2Ka5z3JjidhcM5bpa8Rf0PcXldjH+MyNP5W8sQ5Br+TCa1Bgq+iQIf872dgWJt4fu6UGkStKYWsKOGy1WLQO3HgqYXPZj4MR8WIhvHJd3xgWi+d/I3uH4b44ymg5lWncpjaKNVv0TRj/pl9YHHCH2ICtGBrRJj7UQbV15OIkruwVsE/0TqvGXihbypp6mjJjPeH0IrNeh3k95mpZ70M/i/pn472mHebPPi3r0NvnGO4WTHOz2AXW2wR27oZe0Uswtxsx0kl9jnWy9BeIlSodH2gcoYf/VgyMPIqhYpcYi+pq36xx8XLjQ5SKf55lu7stolPjB5yP4hlh6+3d4r8q6JFM5T4MejWHuv6QEe2q+1fysT4eNypgVR4i1pXBdW0srN7WayzPePsELSs436dtOgLuS4NXnP2v/6ncnUZwidihFfHGON0jrImtda7XehfWDJm9aQ8ixXsespw3ab/I4IhTGt8nv+mwuKMYlXRWscqbM814C/czY8Ad9cus7xkSR9/0vMpeMUy9ic0/vcXEKFwzrRdLaMPlOW24tVH0tt/HiL/oevSP/Ub1F3ur+0bsVm5c6kTWV1SJfuTFmu9LC06gOuBlFFlf11wE84TldrElXs8iyqPfWa88+7O2McGf+t2ahNhlFYhbXwO3+E3egwJ3GrE+6f8nOXk9EFPuXpGNqE2CMRcXw5bms8djlN+18du7aq+t711J8Iq13/qfyD7EqGqONUae556nb8saCuZs+ZmZT6tzbxQbuFhjUrk2WQPbbOUN589099uKRv/NGBh9CBMz2zAo7kfcP7BNOQypV1bdZM5LoK6nb0SOpck559VPYxzmesHxfxZMs3Y+sPomcw2INclBOTCgFVUX7UO591q0hO4yc+3dTH44xu6ag7+TPb9W8UCD7yb5nOvFNm0UH/wIKlyva20a8+Xs/SnyWIgE6yVwXRKL4IWZSPysi+ZTA9/OBTncrX0Dl9r/DV7ti24IDnYty/gl6ttq5O5rgWtgGIyaABScGYgWzID/g7L/UxzP/ruyTzAm5os9OlNkLFR809W1QTEOzypr9QdG7dFcbUvoHp3ZQf78EqfoJPvz8n3Xif7Zj3Hpx0Uu52Wft2ltGPsppmeexLsz2vDWTPF1W1rx/IQ2/LV/m+CYo5iQcVrjEZNzzNkVN9S1iR/WhqXXtOGD2fI7l7XJ2pzHlIxWjI49j8miz//a1/R/72lpE1tzBt0Dtqi8Kx3LFKPW+36JevffxTf/AiXWxSiyca7L05onz/Caj5iQIfC9ORVhH+Url6//8xmM27RaJ4ZsELzyv9X1/+qyv5n8QcC6ApScFH3/WB5sCS407p6AoW3XIvh50WHZrnW2f3POtezpebRN7MFqcH2r8WPGFVjP0tXxFUal7hTMsQvd3d+jwf2N+JS70M39hdZekONxeMIB0ekHMa1oN27qvg9d/Zch1/NpdLeJXUjYjcm5+7SO/traH3BT42HRG7sEz4vNTj0gNnMnBsbs0h7EcRm7ZQ134+bGPaKXyJ8kfyv0a3ltk/yNbbi6eieurtoveJ545wfUuj4V2X+oOUPy/lQ5V+q+oY/CvDgxc4HarOsRdVFfuKbFw7UoAc6HkmCdFwnviSGMma23W7wD/lNdbXs8fr7z7TREr6sUeWfDu9IXaW92RfG3A+D3nKxrnf9Or3/B+/7H9cf1x/XH9cf1x/XH9cf1x/XH9cf1x/XH9cf1x/XH9cf1x/XH9X/9+p/wb6cWG/zz/RwdsGreW369b7P8+jOnLcsv3B/1+PV+p/fyCz8/J1rvffRXb9jZ8UanO5l/zBLNH2+/l1fm+PzmPvrC/Q2WX++jLNUX7r0sN1y4t4T85v4qYHnH/XT8+nyn39x7/Hrf2ePXn/fy6PjCct8J5y0X/t1w1PL/y7/pv956/ObjhP967/G4xdpx7/OJ5XTHffVyjwuf+YY53js77s9bzGXhvznV7WttoXgu/KoswIUfsXT6zZ/t9F9L4bfyw9EL38tD7qPbX/JYdvTCDnBE77ywKhHRN3ZsSUtJ519X6CrZWW3t96dlk5y/8Bkv3HvslA3T/rV8lpfcgPY/FX13xYUNUx396/311XLf/mevRUXHn+10DBEdf6rTRwi5sDG8MeeC1ESUHW+v9/+T//0vFFslQA==\"\r\n \r\n if old == True:\r\n s = Icon7\r\n else:\r\n s = Icon8\r\n\r\n return s.decode('base64').decode('zlib')",
"def api_get_icon():\n pkg_name = request.args.get('pkg')\n if pkg_name:\n pkg_files = Database().db.get_pkg_files(pkg_name)\n for src in pkg_files:\n if src.startswith(\"/usr/share/icons/hicolor/32x32/apps/\"):\n return send_file(src, as_attachment=False)\n return send_file(\"static/images/null.gif\")\n else:\n src = request.args.get('i')\n if not os.path.isfile(src):\n #abort(404)\n return send_file(\"static/images/null.gif\")\n return send_file(src, as_attachment=False)",
"def get_icons():\n ICONS = {\n \"http://files.heuritech.com/raw_files/surfrider/bottle.png\" : \".mot/resources/bottle.png\",\n \"http://files.heuritech.com/raw_files/surfrider/fragment.png\" : \".mot/resources/fragment.png\",\n \"http://files.heuritech.com/raw_files/surfrider/other.png\" : \".mot/resources/other.png\"\n }\n\n home = os.path.expanduser(\"~\")\n if not os.path.isdir(os.path.join(home, \".mot/\")):\n os.mkdir(os.path.join(home, \".mot/\"))\n if not os.path.isdir(os.path.join(home, \".mot/resources\")):\n os.mkdir(os.path.join(home, \".mot/resources\"))\n\n for k,v in ICONS.items():\n path = os.path.join(home, v)\n if not os.path.isfile(path):\n wget.download(k, path)\n print(\"\\ndownloaded to \", path)\n return [cv2.imread(filename,-1) for filename in [os.path.join(home, \".mot/resources/bottle.png\"),\n os.path.join(home, \".mot/resources/fragment.png\"),\n os.path.join(home, \".mot/resources/other.png\")]]",
"def get_icon(self):\n raise NotImplementedError",
"def _trailingIcons(self):",
"def update_icon(self, _widget, _callback_data):\n\t\t\n\t\tprint \"in update_icon for \", self.name\n\t\tself.icon = self.__window.get_icon()\n\t\tself.icon.save(self.imgpath, \"png\")\n\t\tif not self.pile is None:\n\t\t\tself.pile.update_child_icon(self)\n\t\treturn",
"def icon(self):\n return None",
"def icon(self):\n return None",
"def get_icons(self):\n return self.data[\"icons\"]",
"def IcondirEntry( self, image_path, dataoffset, N ):\n \n img, imgdata, mode, bpp = self.LoadImage( image_path, N )\n \n bWidth, bHeight = img.size\n bReserved = 0\n wPlanes = 0 \n wBitCount = bpp\n \n if wBitCount <= 8 and mode == 'P':\n try:\n palettemode, palettedata = img.palette.getdata()\n lenpal = len(palettedata)\n bColorCount = lenpal // 3\n except:\n log_err = 'Image Malformed --> No correct palette for image %s\\n' %image_path\n return log_err\n else:\n bColorCount = 0\n \n dwImageOffset = dataoffset\n\n ## Generate BITMAPINFO header.\n data = self.HeaderBmpinfo( bWidth, bHeight, wBitCount, imgdata, bColorCount )\n \n ## Write the palette.\n if mode == 'P':\n if palettemode in ['RGB;L', 'RGB']:\n for x in range(0, lenpal, 3):\n ## B, G, R, 0.\n data += bytes([palettedata[x + 2]]) + bytes([palettedata[x + 1]]) + bytes([palettedata[x]])\n data += pack('B', 0)\n \n elif palettemode in ['RGBA;L', 'RGBA']:\n for x in range(0, lenpal, 4):\n ## B, G, R, A.\n data += bytes([palettedata[x + 2]]) + bytes([palettedata[x + 1]]) + bytes([palettedata[x]]) + bytes([palettedata[x + 3]])\n \n ## Write XOR mask (Image).\n data += imgdata\n \n ## Write AND mask (Transparency).\n rowsize = self.CalcRowSize( 1, bWidth )\n masksize = rowsize * abs(bHeight)\n\n if mode == 'RGBA':\n data += MASK().ComputeANDMask( imgdata, bWidth, bHeight )\n elif mode in ['RGB', 'P']:\n data += pack('B', 0) * masksize\n \n ## Increment data offset.\n dataoffset += len(data)\n\n ## Calculate size of icondirentry + image data.\n dwBytesInRes = len(data)\n\n ## Define correct dimension, 0 means 256 (or more).\n if bWidth >= 256: bWidth = 0\n if bHeight >= 256: bHeight = 0\n \n # Pack the icondirentry header.\n icondirentry = pack('4B2H2I', bWidth, bHeight, bColorCount, bReserved, wPlanes, wBitCount, dwBytesInRes, dwImageOffset)\n\n return icondirentry, data, dataoffset",
"def make_image(self, path):\n\t\treturn self.ui.get_icon(path)",
"def icons(users, distance):\n\n # It would be pretty cool to put user thumbails where points are.\n # but i'm still not sure how to do this yet.\n images = []\n\n try:\n print 'getting images..'\n for p in users:\n print p\n f = p.image\n img = imread('image.tmp')\n images.append(img)\n except Exception as e:\n print 'got an error...'\n import traceback\n etype, evalue, tb = sys.exc_info()\n print yellow % '\\n'.join(traceback.format_exception(etype, evalue, tb))\n ip()\n\n (W, H, _) = shape(img) # thumbnails should all be the same size\n count = len(images)\n\n pl.figure()\n\n P2, _ = mds(distance, 2)\n X,Y = P2[:,0], P2[:,1]\n\n ## XXX: not a great transformation b/c we might stretch more in one dimension\n def N(x):\n \"force x to fit in interval [0,1]\"\n x = (x - x.min())\n x = x / x.max()\n assert all(x >= 0) and all(x <= 1)\n return x\n X = N(X)*475\n Y = N(Y)*425\n\n figimages = [pl.figimage(img, xo=x, yo=y) for img, x, y in zip(images, X, Y)]",
"def processIconFilename(self):\n\t\tself.iconFilename = self._getVal(64, 2)",
"def getNewsIconURL(newsBrain):",
"def getimgs():",
"def get_icon(self):\r\n return get_icon(self.ICON)",
"def icon(self):\r\n try:\r\n return self.data['icon_url_base']+self.data['icon_url_name']\r\n except KeyError:\r\n return ''",
"def icon(self):\n ret_icon = self._icon\n if self.player_name == \"lower\":\n ret_icon = self._icon.lower()\n if self.is_promoted:\n ret_icon = \"+\" + ret_icon\n return ret_icon",
"def __make_icon():\n icon = pygame.image.load(str(PurePath(\"res/Images/bird_wing_down.png\")))\n return icon",
"def showEmoticonList(self):\n print \"Guess what? No emoticons. But I'll put in a random one for you\"\n self.appendImageAtCursor(\"throbber.gif\")",
"def icon(self):\n return self._metadata[2]",
"def get_icon(self) -> Dict[str, Any]:\n player = self._last_sessionplayer\n assert player is not None\n return player.get_icon()",
"def get_icon(self):\n if self.verb == \"C\" or self.verb == \"A\" or self.verb == \"K\":\n return \"fa-comment\"\n\n elif self.verb == \"I\" or self.verb == \"U\" or self.verb == \"O\":\n return \"fa-users\"\n\n elif self.verb == \"L\":\n return \"fa-heart\"\n\n elif self.verb == \"F\":\n return \"fa-star\"\n\n elif self.verb == \"W\":\n return \"fa-check-circle\"\n\n elif self.verb == \"E\":\n return \"fa-pencil\"\n\n elif self.verb == \"V\":\n return \"fa-plus\"\n\n elif self.verb == \"S\":\n return \"fa-share-alt\"\n\n elif self.verb == \"R\":\n return \"fa-reply\""
] | [
"0.70672655",
"0.65587485",
"0.62278324",
"0.6146548",
"0.613125",
"0.61307836",
"0.6071784",
"0.60547864",
"0.59524333",
"0.59221816",
"0.59087664",
"0.5892237",
"0.5814968",
"0.5772996",
"0.5772996",
"0.5770113",
"0.57606995",
"0.5742103",
"0.57360274",
"0.57078516",
"0.5705857",
"0.569348",
"0.5689409",
"0.5654394",
"0.5641549",
"0.56388307",
"0.5601957",
"0.5550341",
"0.5545227",
"0.5529701"
] | 0.68753237 | 1 |
A generic base class for electrical units. The class contains attributes to store the magnitude of the key and also contain the respective frequency associated with the electrical characteristic. Where series of frequency values are required it is expected that these will be achieved by using a list containing objects of the appropriate class. | def __init__(self, frequency: int = None, freq_unit: str = None, **kwargs):
self._freq: int = freq
self._freq_unit: str = freq_unit | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, value, key_length=1):\r\n super().__init__(self.key, value)\r\n self.key_length = key_length\r\n self.items = OrderedDict()\r\n self.parse()\r\n\r\n self._PlatformTailNumber = None\r\n self._PlatformHeadingAngle = None\r\n self._ImageSourceSensor = None\r\n self._SensorLatitude = None\r\n self._SensorLongitude = None\r\n self._SensorTrueAltitude = None\r\n self._SensorHorizontalFieldOfView = None\r\n self._SensorVerticalFieldOfView = None\r\n self._targetWidth = None\r\n self._slantRange = None\r\n self._SensorRelativeAzimuthAngle = None\r\n self._OffsetCornerLatitudePoint1 = None\r\n self._OffsetCornerLongitudePoint1 = None\r\n self._OffsetCornerLatitudePoint2 = None\r\n self._OffsetCornerLongitudePoint2 = None\r\n self._OffsetCornerLatitudePoint3 = None\r\n self._OffsetCornerLongitudePoint3 = None\r\n self._OffsetCornerLatitudePoint4 = None\r\n self._OffsetCornerLongitudePoint4 = None\r\n self._FrameCenterLatitude = None\r\n self._FrameCenterLongitude = None\r\n self._FrameCenterElevation = None\r\n self._CornerLatitudePoint1Full = None\r\n self._CornerLongitudePoint1Full = None\r\n self._CornerLatitudePoint2Full = None\r\n self._CornerLongitudePoint2Full = None\r\n self._CornerLatitudePoint3Full = None\r\n self._CornerLongitudePoint3Full = None\r\n self._CornerLatitudePoint4Full = None\r\n self._CornerLongitudePoint4Full = None",
"def units(self, key):\n \n # Strip any operators\n _, key = get_operator(key)\n \n # Fill out aliases \n if key in component_from_alias:\n key = component_from_alias[key]\n elif key == 'E':\n key = 'electricField'\n elif key == 'B':\n key = 'magneticField' \n \n return pg_units(key)",
"def __init__(self, chars=False):\n self.chars = chars\n self.lm_dict = defaultdict(lambda: defaultdict(float))",
"def __init__(\n self,\n chi1=chi1_default,\n chi2=chi2_default,\n m1=m1_default,\n m2=m2_default,\n chip=chip_default,\n thetaJ=thetaJ_default,\n alpha0=alpha0_default,\n distance=distance_default,\n phic=phic_default,\n fref=fref_default,\n mode=1,\n freqs=[\n flow_default,\n fhigh_default,\n -1],\n freqs_from_range=True,\n convert_units=True):\n self.chi1 = chi1\n self.chi2 = chi2\n self.m1 = m1\n self.m2 = m2\n self.distance = distance\n self.thetaJ = thetaJ\n self.alpha0 = alpha0\n self.chip = chip\n self.phic = phic\n self.fref = fref\n self.mode = mode\n\n # Perform unit conversions, if requested\n if(convert_units):\n self.m1 = self.m1 * lal.lal.MSUN_SI\n self.m2 = self.m2 * lal.lal.MSUN_SI\n self.distance = self.distance * lal.lal.PC_SI * 100 * 1e6\n\n # Generate frequency array\n if(freqs_from_range):\n flow = freqs[0]\n fhigh = freqs[1]\n n_freqs = freqs[2]\n # If n_freqs<1, then assum dfreq=1.\n if(n_freqs < 1):\n self.freqs = np.linspace(flow, fhigh, (fhigh - flow) + 1)\n self.n_freqs = len(self.freqs)\n else:\n self.n_freqs = n_freqs\n self.freqs = np.linspace(flow, fhigh, self.n_freqs)\n # If freqs_from_range is false, then assume that freqs specifies a list of frequencies\n else:\n self.freqs = freqs\n self.n_freqs = len(self.freqs)",
"def __init__(self):\n # Assign -1 to conform to the specification of the typing hint.\n # Since the 'key' will not be called and used before it is assigned,\n # the assignment here will not have a bad effect.\n self.key = -1 # type: keyType\n # The dictionary support the different values with the same key.\n self.values = [] # type: List[valueType]",
"def __new__(cls, f, time_unit='s'):\r\n\r\n tuc = time_unit_conversion\r\n scale_factor = (float(tuc['s']) / tuc[time_unit])\r\n #If the input is a Frequency object, it is already in Hz:\r\n if isinstance(f, Frequency) == False:\r\n #But otherwise convert to Hz:\r\n f = f * scale_factor\r\n\r\n freq = super(Frequency, cls).__new__(cls, f)\r\n freq._time_unit = time_unit\r\n\r\n return freq",
"def __init__(self, **kwargs):\n self.singleComp = True if 'gamma_nl' in kwargs else False\n self.__checkArgs(kwargs)\n\n # Define the output units and actual units of the parameters\n # We'll make them strings to make life easy, and because we want them to\n # be reported int the right way\n self.ureg = pint.UnitRegistry()\n m_e = self.ureg.electron_mass.to_base_units().magnitude\n hbar = self.ureg.hbar.to_base_units().magnitude\n self.gOutput = \"millieV * micrometer**2\"\n self.rOutput = \"millieV * micrometer**2 * hbar**-1\"\n self.gammaOutput = \"picosecond ** -1\"\n self.gammaNlBase = \"meter**2 * second**-1\"\n self.mOutput = \"electron_mass\"\n self.charLOutput = \"micrometer\"\n self.charTOutput = \"picosecond\"\n self.gammaNlOutput = \"millieV * micrometer**2 * hbar**-1\"\n self.mBase = \"gram\"\n self.gBase = \"gram * meter **4 * second ** -2\"\n self.rBase = \"meter**2 * second **-1\"\n self.gammaBase = \"second ** -1\"\n self.charLBase = \"meter\"\n self.charTBase = \"second\"\n\n # Read in the keyword arguments\n for (k, v) in kwargs.items():\n setattr(self, k, v.to_base_units().magnitude)\n\n # Set mass. We read in mass in units of electron mass for convenience,\n # but it must be converted to SI units\n # self.m_scaled = self.m\n # self.m = self.__class__.__m_e * self.m\n\n # m is now read in as a pint quantity. We don't need to scale it up by\n # the electron mass, but we do need to find the scaled mass\n self.m_scaled = self.m / m_e\n # Read in k or set to default\n self.k = kwargs.get('k', hbar**2 / (2 * self.m))\n\n # Define our characteristic length, time, and energy scales.\n # If t' is the (nondimensional) time variable used in the\n # nondimensionalised GPE, then t = charT * t'. For example, R' is the\n # stimulated scattering rate used in the normalised GPE, so R = charR *\n # R'. If they are not provided, we will set them to the default, which\n # is the scaling that makes k=1 and gamma'_C = 1.\n\n self.charT = kwargs.get('charT', 1.0 / self.gamma_C)\n if 'charT' in kwargs.keys():\n self.charT = self.charT.to_base_units().magnitude\n self.charL = kwargs.get('charL', np.sqrt((hbar\n * self.charT)\n / (2.0 * self.m)))\n if 'charL' in kwargs.keys():\n self.charL = self.charL.to_base_units().magnitude\n # A characteristic energy\n self.charU = hbar / self.charT\n self.charg = (hbar * self.charL**2) / self.charT\n self.charR = self.charL ** 2 / self.charT\n self.charGamma = 1.0 / self.charT\n self.charGammaNl = self.charL ** 2 / self.charT\n # TODO: Check\n self.chark = (hbar * self.charL**2) / self.charT\n # This may not be required - the P term in the GPE is phenomonological,\n # and the experimentalist probably only knows it in terms of Pth\n self.charP = 1.0 / (self.charT * self.charL ** 2)\n\n # Scaled parameters - these are the ones to used in the\n # nondimensionalised GPE\n self.g_C_scaled = self.g_C / self.charg\n self.gamma_C_scaled = self.gamma_C / self.charGamma\n self.k_scaled = self.k / self.chark\n self.g_R_scaled = self.g_R / self.charg\n self.gamma_R_scaled = self.gamma_R / self.charGamma\n self.R_scaled = self.R / self.charR\n # Compute threshold pump power for the normalised GPE.\n self.Pth_scaled = ((self.gamma_R_scaled * self.gamma_C_scaled)\n / self.R_scaled)\n # Compute threshold pump power for unnormalised GPE. We can get this\n # from the scaled one.\n self.Pth = self.charP * self.Pth_scaled\n\n if self.singleComp:\n self.gamma_nl_scaled = self.gamma_nl / self.charGammaNl",
"def __init__(self, f, E, N, H=18.0, R=34.0):\n super().__init__()\n self.freq = f # Hz - Frequency\n self.E, self.N, self.H, self.R = E, N, H, R\n self.X2 = E*E + N*N + H*H + R*R",
"def __init__(self, capacity, units=0):\n self.capacity = capacity\n self.units = units",
"def __init__(self):\n self.freq = {}",
"def __init__(self):\n self.key2value = {}\n self.key2time = {}",
"def __init__(self,units=None):\n self.__units = units",
"def __init__(self, freq=None, **kwargs):\n if freq is not None:\n assert isinstance(freq, np.ndarray)\n self.freq = freq\n for key, val in kwargs.items():\n setattr(self, key, val)",
"def __init__(self, value=None, unit=None):\n # When no unit is specified, bend over backwards to handle all one-argument possibilities\n if unit == None: # one argument version, copied from UList\n if is_unit(value):\n # Unit argument creates an empty list with that unit attached\n unit = value\n value = []\n elif is_quantity(value):\n # Ulist of a Quantity is just the Quantity itself\n unit = value.unit\n value = value._value\n elif _is_string(value):\n unit = dimensionless\n else:\n # Is value a container?\n is_container = True\n try:\n i = iter(value)\n except TypeError:\n is_container = False\n if is_container:\n if len(value) < 1:\n unit = dimensionless\n else:\n first_item = next(iter(value))\n # Avoid infinite recursion for string, because a one-character\n # string is its own first element\n if value == first_item:\n unit = dimensionless\n else:\n unit = Quantity(first_item).unit\n # Notice that tuples, lists, and numpy.arrays can all be initialized with a list\n new_container = Quantity([], unit)\n for item in value:\n new_container.append(Quantity(item)) # Strips off units into list new_container._value\n # __class__ trick does not work for numpy.arrays\n try:\n import numpy\n if isinstance(value, numpy.ndarray):\n value = numpy.array(new_container._value)\n else:\n # delegate contruction to container class from list\n value = value.__class__(new_container._value)\n except ImportError:\n # delegate contruction to container class from list\n value = value.__class__(new_container._value)\n else:\n # Non-Quantity, non container\n # Wrap in a dimensionless Quantity\n unit = dimensionless\n # Accept simple scalar quantities as units\n if is_quantity(unit):\n value = value * unit._value\n unit = unit.unit\n # Use empty list for unspecified values\n if value == None:\n value = []\n\n self._value = value\n self.unit = unit",
"def __init__(self, timeseries, freq, ch_name, units, start_time, filename=\"\"):\n self.timeseries = deepcopy(is_valid(timeseries, np.ndarray))\n self.freq = deepcopy(is_valid(freq, (int, float)))\n self.ch_name = deepcopy(has_size(ch_name, self.ch_amount, \"unknown\"))\n self.units = deepcopy(has_size(units, self.ch_amount, \"[]\"))\n self.start_time = deepcopy(start_time)\n self.filename = deepcopy(is_valid(filename, str))",
"def __init__(self,\n measure_name_fmix: str,\n measure_name_emis: str,\n database: str):\n super().__init__()\n self._measurements[self.KEY_FMIX] = Measurement(name=measure_name_fmix,\n unit=self.UNIT_FMIX,\n database=database)\n self._measurements[self.KEY_EMIS] = Measurement(name=measure_name_emis,\n unit=self.UNIT_EMIS,\n database=database)",
"def __init__(self, termname, keys, ordinal=False):\n\n if not ordinal:\n self.keys = list(set(keys))\n self.keys.sort()\n else:\n self.keys = keys\n if len(set(keys)) != len(list(keys)):\n raise ValueError('keys for ordinal Factor should be unique, in increasing order')\n self._name = termname\n self.termname = termname\n self.ordinal = ordinal\n\n if self.ordinal:\n name = self.termname\n else:\n name = ['(%s==%s)' % (self.termname, str(key)) for key in self.keys]\n\n Term.__init__(self, name, termname=self.termname, func=self.get_columns)",
"def __init__(self):\n \n # list of all frequencies\n self.freqs = []\n \n # list of frequencies with Z != 0\n self.freqsToSolve = []\n \n # minimum Ck value to be kept\n self.min_Ck = 1.0e-18\n \n # Ck spectrum (list of OneCk objects)\n self.Ck = []",
"def unit_key(self):\n data = self.to_dict()\n return self.generate_unit_key(*[data[key] for key in UNIT_KEYS])",
"def get_derived_unit(registry, key):\n if registry is None:\n return 1.0\n derived = {\n \"diffusivity\": registry[\"length\"] ** 2 / registry[\"time\"],\n \"electrical_mobility\": (\n registry[\"current\"] * registry[\"time\"] ** 2 / registry[\"mass\"]\n ),\n \"permittivity\": (\n registry[\"current\"] ** 2\n * registry[\"time\"] ** 4\n / (registry[\"length\"] ** 3 * registry[\"mass\"])\n ),\n \"charge\": registry[\"current\"] * registry[\"time\"],\n \"energy\": registry[\"mass\"] * registry[\"length\"] ** 2 / registry[\"time\"] ** 2,\n \"concentration\": registry[\"amount\"] / registry[\"length\"] ** 3,\n \"density\": registry[\"mass\"] / registry[\"length\"] ** 3,\n }\n derived[\"diffusion\"] = derived[\"diffusivity\"] # 'diffusion' is deprecated\n derived[\"radiolytic_yield\"] = registry[\"amount\"] / derived[\"energy\"]\n derived[\"doserate\"] = derived[\"energy\"] / registry[\"mass\"] / registry[\"time\"]\n derived[\"linear_energy_transfer\"] = derived[\"energy\"] / registry[\"length\"]\n\n try:\n return derived[key]\n except KeyError:\n return registry[key]",
"def __init__(self, categories, key_value_encoding=None, key_fill_value=None, **kwargs):\n kwc=kwargs.copy()\n AbstractComplexParameterType.__init__(self, value_class='CategoryValue', **kwc)\n\n if not isinstance(categories, dict) or len(categories.keys()) == 0:\n raise TypeError('\\'categories\\' must be of type dict and cannot be empty: {0}'.format(categories))\n\n if key_value_encoding is None:\n # Get the type of the first key\n key_value_encoding = np.asanyarray(categories.keys()[0]).dtype.str\n else:\n key_value_encoding = np.dtype(key_value_encoding).str\n\n want_kind=np.dtype(key_value_encoding).kind\n if want_kind not in self.SUPPORTED_CATETEGORY_KEY_KINDS:\n raise TypeError('\\'key_value_encoding\\' is not supported; supported np.dtype.kinds: {0}'.format(self.SUPPORTED_CATETEGORY_KEY_KINDS))\n\n for k in categories.keys():\n if np.asanyarray(k).dtype.kind != want_kind:\n raise ValueError('A key in \\'categories\\' ({0}) does not match the specified \\'key_value_encoding\\' ({1})'.format(k, key_value_encoding))\n\n if want_kind == 'S':\n self.base_type = ArrayType()\n else:\n self.base_type = QuantityType(value_encoding=key_value_encoding)\n\n self._template_attrs['categories'] = categories\n self._gen_template_attrs()",
"def key(self):\n raise NotImplementedError(\"'key' not implemented for Element subclass\")",
"def __init__(self, items: List[T], min_freq: int = 1):\n counter_ = Counter(items)\n unique_items = [x for x, freq in counter_.items() if freq >= min_freq]\n self._dict = {item: i + 1 for i, item in enumerate(unique_items)}\n self._items: List[Union[str, T]] = [\"UNK\"]\n self._items.extend(unique_items)",
"def __init__(self):\n self.keys = []\n self.values = []",
"def __init__(self, key):\n\n def keys(key, num_rounds):\n \"\"\"Yields the permuted key bitstring for i = 1..num_rounds\"\"\"\n C, D = key[:28], key[28:]\n # Rounds are 1-indexed, so shift array over by one\n left_shifts = [None, 1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1]\n for i in range(1, num_rounds + 1):\n # Negate each rotation to rotate left.\n C, D = rotate(C, -left_shifts[i]), rotate(D, -left_shifts[i])\n yield self.permute(C + D, self._CD_permutation)\n\n self.key = list(bits_of(key, 64))\n # Permute the key. The permutation discards the parity bits...\n self.key = self.permute(self.key, self._key_permutation)\n self.number_of_rounds = 16\n # A list of the 16 keys K1 .. K16, shifted over by one to allow 1-indexing.\n self.keys = [None] + list(keys(self.key, self.number_of_rounds))",
"def __init__(self):\n super().__init__()\n self.mapping = {}\n self.values = set()\n self.type = 'Categorical'\n self.dimensionality = 1\n self.distType = 'Discrete'\n self.isFloat = False",
"def __init__ (self):\n self.lengths = {}\n self.lower_counts = {}\n self.upper_counts = {}\n self.digit_counts = {}\n self.symbol_counts = {}\n self.class_counts = {}\n self.word_counts = {}",
"def __init__(self, *args: Species, units=KcalMol):\n super().__init__()\n\n for arg in args:\n assert isinstance(arg, Species)\n self.append(arg)\n\n self.units = units",
"def setup_class(self):\n class SubCosmology(Cosmology):\n\n H0 = Parameter(unit=u.km / u.s / u.Mpc)\n Tcmb0 = Parameter(unit=u.K)\n\n def __init__(self, H0, Tcmb0=0*u.K, name=None, meta=None):\n super().__init__(name=name, meta=meta)\n self._H0 = H0\n self._Tcmb0 = Tcmb0\n\n self.cls = SubCosmology\n self.cls_args = (70 * (u.km / u.s / u.Mpc), 2.7 * u.K)\n self.cls_kwargs = dict(name=self.__class__.__name__, meta={\"a\": \"b\"})",
"def __init__(self, capacity):\n self.capacity = capacity # Number of buckets in the hash table\n self.storage = [None] * capacity\n self.key_count = 0"
] | [
"0.55618083",
"0.54142815",
"0.53817075",
"0.53536946",
"0.53438824",
"0.52844536",
"0.51879114",
"0.51562005",
"0.5135914",
"0.5134475",
"0.51331466",
"0.5132032",
"0.5116857",
"0.51153636",
"0.50826174",
"0.50541294",
"0.50439155",
"0.5021056",
"0.50179166",
"0.50034547",
"0.49949157",
"0.4982116",
"0.49816266",
"0.49753276",
"0.4973889",
"0.4963617",
"0.49473476",
"0.49236602",
"0.49234697",
"0.49199802"
] | 0.5463841 | 1 |
There are two modes. 1. Add a int/float to an Volt() object. Return a new Volt() object with '.volts' that is the sum of the 'self.volts' and the passed int/float. 2. Adding two Volt() objects together, returning a new Volt() object with '.volts' that is the sum of 'self.volts' and 'other.volts'. | def __add__(self, other):
if isinstance(other, int) or isinstance(other, float):
return Volt(self.volts + other, self.volt_unit, self.freq, self.freq_unit)
if self.volt_unit != other.volt_unit:
raise ArithmeticError(f"The objects' volt units {self.volt_unit} and {other.volt_unit} are not the same.")
if self.freq != other.frequency:
raise ArithmeticError(f"The objects' frequency {self.freq} and {other.frequency} are not the same.")
if self.freq_unit != other.freq_unit:
raise ArithmeticError(f"The objects' frequency units {self.freq_unit} and {other.freq_unit} "
f"are not the same.")
volt_sum = self.volts + other.volts
return Volt(volt_sum, self.volt_unit, self.freq, self.freq_unit) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __add__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Amp(self.amps + other, self.amp_unit, self.freq, self.freq_unit)\n if self.amp_unit != other.amp_unit:\n raise ArithmeticError(f\"The objects' amp units {self.amp_unit} and {other.amp_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n amp_sum = self.amps + other.amps\n return Amp(amp_sum, self.amp_unit, self.freq, self.freq_unit)",
"def __mul__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Volt(self.volts * other, self.volt_unit, self.freq, self.freq_unit)\n else:\n if self.volt_unit != other.volt_unit:\n raise ArithmeticError(f\"The objects' volt units {self.volt_unit} and {other.volt_unit} are not the\"\n f\" same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n prod_sum = self.volts * other.volts\n return Volt(prod_sum, self.volt_unit, self.freq, self.freq_unit)",
"def __sub__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Volt(self.volts - other, self.volt_unit, self.freq, self.freq_unit)\n if self.volt_unit != other.volt_unit:\n raise ArithmeticError(f\"The objects' volt units {self.volt_unit} and {other.volt_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n volt_sum = self.volts - other.volts\n return Volt(volt_sum, self.volt_unit, self.freq, self.freq_unit)",
"def __add__( self, other ) :\n\n try :\n other = float( other )\n c_ls = self.copy( )\n for l, c_l in enumerate( c_ls ) : c_ls.coefficients[l] += other\n except :\n self.checkSameSeriesType( other )\n c_l1, c_l2 = self, other\n if( len( self ) < len( other ) ) : c_l1, c_l2 = other, self\n c_ls = c_l1.copy( )\n for l, c_l in enumerate( c_l2 ) : c_ls.coefficients[l] += c_l\n return( c_ls )",
"def __truediv__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Volt(self.volts / other, self.volt_unit, self.freq, self.freq_unit)\n else:\n if self.volt_unit != other.volt_unit:\n raise ArithmeticError(f\"The objects' volt units {self.volt_unit} and {other.volt_unit} are not the\"\n f\" same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n prod_sum = self.volts / other.volts\n return Volt(prod_sum, self.volt_unit, self.freq, self.freq_unit)",
"def __add__(self, other):\n try:\n total = {self.var: 1, other.var: 1}\n return AutoDiffReverse(self.val + other.val, None, der=total)\n except AttributeError:\n return AutoDiffReverse(self.val + other, None, {self.var: 1})",
"def __add__(self, other):\n\t\ttry:\n\t\t\tval = self.val + other.val\n\n\t\t\t# Handle case when self.der or other.der contains None \n\t\t\t# i.e. self or other is a vector of scalars, not of Vars\n\t\t\tlen_self_der_shape = len(self.der.shape)\n\t\t\tlen_other_der_shape = len(other.der.shape)\n\n\t\t\tif not len_self_der_shape and len_other_der_shape:\n\t\t\t\tder = other.der\n\t\t\telif len_self_der_shape and not len_other_der_shape:\n\t\t\t\tder = self.der\n\t\t\telif not len_self_der_shape and not len_other_der_shape:\n\t\t\t\tder = None\n\t\t\telse:\n\t\t\t\tder = self.der + other.der\n\t\texcept AttributeError:\n\t\t\tval = self.val + other\n\t\t\tder = self.der\n\t\treturn Var(val, der)",
"def __add__(self, other):\n\n self._add_sub_error_checking(other)\n if (self._counts is None) ^ (other._counts is None):\n raise SpectrumError(\n 'Addition of counts-based and CPS-based spectra is ' +\n 'ambiguous, use Spectrum(counts=specA.counts+specB.counts) ' +\n 'or Spectrum(cps=specA.cps+specB.cps) instead.')\n\n if self._counts is not None and other._counts is not None:\n kwargs = {'counts': self.counts + other.counts}\n if self.livetime and other.livetime:\n kwargs['livetime'] = self.livetime + other.livetime\n else:\n warnings.warn('Addition of counts with missing livetimes, ' +\n 'livetime was set to None.', SpectrumWarning)\n else:\n kwargs = {'cps': self.cps + other.cps}\n spect_obj = Spectrum(\n bin_edges_kev=self.bin_edges_kev, **kwargs)\n return spect_obj",
"def __add__(self,other):\n self._obj['u'] += other._obj['u']\n self._obj['v'] += other._obj['v']\n return self._obj",
"def __iadd__(self, other):\n\n if isinstance(other, float):\n self.iadd_scalar(other)\n else:\n self.iadd(other)",
"def __add__(self, other):\n # other is a scalar\n if isinstance(other, (int, float, complex, Fraction)) and not isinstance(other, bool):\n return Vector([i + other for i in self.data], self.column)\n # other is a Vector\n elif isinstance(other, Vector):\n if len(self.data) != len(other):\n raise Exception('Vectors are not of equal length')\n elif self.column != other.column:\n raise Exception('Vectors are not of equal orientation')\n else:\n return Vector([self.data[i] + other.data[i] for i in range(len(self.data))], self.column)\n # other is not a scalar or a Vector\n else:\n raise Exception('Argument is not a number or a Vector') from TypeError",
"def __add__(self, other):\n return add_mps(self, other)",
"def __add__(self, other):\n n = len(self)\n\n if n != len(other):\n raise(VetorError, \"Vetor dimensions are not equal\")\n\n v = zeros_como(self)\n\n for i in range(n):\n v[i] = self[i] + other[i]\n\n return v",
"def __add__(self, other):\n if type(other) == int or type(other) == float:\n x = (other * Ccy.currencies[self.unit])\n else:\n x = (other.value / Ccy.currencies[other.unit] * Ccy.currencies[self.unit])\n return Ccy(x + self.value, self.unit)",
"def __add__(self, other):\n return Vec2d(self.v[0] + other[0], self.v[1] + other[1])",
"def __add__(self, other):\n\t\tif isinstance(other, Value):\n\t\t\treturn Value(self.val + other.val, sqrt(self.error**2 + other.error**2))\n\t\telse:\n\t\t\treturn Value(self.val + other, self.error)",
"def __add__(self, other):\n\n if isinstance(other, type(self)):\n # always create new particles, since otherwise c = a + b changes a as well!\n p = particles(self)\n p.pos[:] = self.pos + other.pos\n p.vel[:] = self.vel + other.vel\n p.m = self.m\n p.q = self.q\n return p\n else:\n raise DataError(\"Type error: cannot add %s to %s\" % (type(other), type(self)))",
"def __add__(self, other):\n tmp = VectorHeat1D(self.size)\n tmp.set_values(self.get_values() + other.get_values())\n return tmp",
"def __add__(self, other):\n output = Spectrum(self.wavelengths, self.intensities)\n for wavelength, intensity in other:\n if output[wavelength]:\n output[wavelength] += intensity\n else:\n output[wavelength] = intensity\n return output",
"def __iadd__(self, other):\r\n if isinstance(other, vec4):\r\n self.x+=other.x\r\n self.y+=other.y\r\n self.z+=other.z\r\n self.w+=other.w\r\n return self\r\n else:\r\n raise TypeError, \"unsupported operand type for +=\"",
"def __add__(self, other):\n\n if isinstance(other, type(self)):\n # always create new fields, since otherwise c = a - b changes a as well!\n p = fields(self)\n p.elec[:] = self.elec + other.elec\n p.magn[:] = self.magn + other.magn\n return p\n else:\n raise DataError(\"Type error: cannot add %s to %s\" % (type(other), type(self)))",
"def __add__(self, other):\n return Vector([c1 + c2 for (c1, c2) in zip(self.components, other.components)])",
"def __add__(self, other):\n if other == 0:\n return self\n\n pmf = Pmf()\n for v1, p1 in self.items():\n for v2, p2 in other.items():\n pmf[v1 + v2] += p1 * p2\n return pmf",
"def __add__(self, other):\r\n if isinstance(other, vec4):\r\n return vec4(self.x+other.x, self.y+other.y, self.z+other.z, self.w+other.w)\r\n else:\r\n raise TypeError, \"unsupported operand type for +\"",
"def __add__(self, rhs: Union[float, Simpy]) -> Simpy:\n result: list[float] = []\n if isinstance(rhs, float):\n for item in self.values:\n result.append(item + rhs)\n else:\n assert len(self.values) == len(rhs.values)\n for i in range(len(self.values)):\n result.append(self.values[i] + rhs.values[i])\n return Simpy(result)",
"def __add__(self, other):\n if len(self) != len(other):\n raise ValueError('As dimensões devem ser iguais')\n\n result = Vector(len(self)) # inicia um novo array do tamanho do próprio\n for i in range(len(self)):\n result[i] = self[i] + other[i]\n return result",
"def __add__(self, rhs):\n if isinstance(rhs, UTPS):\n return UTPS(self.tc + rhs.tc)\n elif numpy.isscalar(rhs):\n retval = UTPS(numpy.copy(self.tc))\n retval.tc[0] += rhs\n return retval\n else:\n raise NotImplementedError",
"def __add__(self, other):\n if isinstance(other, complex):\n return Power(self.power + other, self.power_unit, self.freq, self.freq_unit)\n if self.power_unit != other.power_unit:\n raise ArithmeticError(f\"The objects' ohm units {self.power_unit} and {other.power_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n power_sum = self.power + other.power\n return Power(power_sum, self.power_unit, self.freq, self.freq_unit)",
"def __add__(self, other):\n if len( self) != len(other):\n raise ValueError('Dimensions must match.')\n result = Vector(len(self))\n for i in range(len(self)):\n result[i] = self[i] + other[i]\n return result",
"def __add__(self, other):\n if isinstance(other, (int, type(Zero()))):\n if (other == 0):\n return self\n self._check_vector(other)\n return Vector(self.args + other.args)"
] | [
"0.74142647",
"0.72871935",
"0.7271407",
"0.7136929",
"0.7131495",
"0.70471036",
"0.7003873",
"0.6998109",
"0.695118",
"0.687143",
"0.68235624",
"0.6776829",
"0.67384326",
"0.67028344",
"0.66680944",
"0.6656349",
"0.6656332",
"0.6616977",
"0.6614763",
"0.6606953",
"0.6599726",
"0.65970796",
"0.6595671",
"0.65897316",
"0.6588833",
"0.6582857",
"0.65721613",
"0.65484947",
"0.654401",
"0.6528954"
] | 0.8909028 | 0 |
1. Subtract a insulation_code from an Volt() object. Return a new Volt() object with '.volts' that is the difference of the 'self.volts'and the passed int/float. 2. Subtract two Volt() objects, returning a new Volt() object with '.volts' that is the difference of 'self.volts' and 'other.volts'. | def __sub__(self, other):
if isinstance(other, int) or isinstance(other, float):
return Volt(self.volts - other, self.volt_unit, self.freq, self.freq_unit)
if self.volt_unit != other.volt_unit:
raise ArithmeticError(f"The objects' volt units {self.volt_unit} and {other.volt_unit} are not the same.")
if self.freq != other.frequency:
raise ArithmeticError(f"The objects' frequency {self.freq} and {other.frequency} are not the same.")
if self.freq_unit != other.freq_unit:
raise ArithmeticError(f"The objects' frequency units {self.freq_unit} and {other.freq_unit} "
f"are not the same.")
volt_sum = self.volts - other.volts
return Volt(volt_sum, self.volt_unit, self.freq, self.freq_unit) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __sub__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Amp(self.amps - other, self.amp_unit, self.freq, self.freq_unit)\n if self.amp_unit != other.amp_unit:\n raise ArithmeticError(f\"The objects' amp units {self.amp_unit} and {other.amp_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n amp_sum = self.amps - other.amps\n return Amp(amp_sum, self.amp_unit, self.freq, self.freq_unit)",
"def __sub__(self, other):\n try:\n total = {self.var: 1, other.var: -1}\n return AutoDiffReverse(self.val - other.val, None, der=total)\n except AttributeError:\n return AutoDiffReverse(self.val - other, None, {self.var: 1})",
"def __sub__( self, other ) :\n\n try :\n other = float( other )\n c_ls = self.copy( )\n for l, c_l in enumerate( c_ls ) : c_ls.coefficients[l] -= other\n except :\n self.checkSameSeriesType( other )\n c_l1, c_l2 = self.coefficients, other.coefficients\n if( len( self ) < len( other ) ) : c_l1, c_l2 = c_l2, c_l1\n c_ls = c_l1.copy( )\n for l, c_l in enumerate( c_l2 ) : c_ls.coefficients[l] += c_l\n return( c_ls )",
"def __truediv__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Volt(self.volts / other, self.volt_unit, self.freq, self.freq_unit)\n else:\n if self.volt_unit != other.volt_unit:\n raise ArithmeticError(f\"The objects' volt units {self.volt_unit} and {other.volt_unit} are not the\"\n f\" same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n prod_sum = self.volts / other.volts\n return Volt(prod_sum, self.volt_unit, self.freq, self.freq_unit)",
"def __sub__(self, other):\n\n if isinstance(other, type(self)):\n # always create new fields, since otherwise c = a - b changes a as well!\n p = fields(self)\n p.elec[:] = self.elec - other.elec\n p.magn[:] = self.magn - other.magn\n return p\n else:\n raise DataError(\"Type error: cannot subtract %s from %s\" % (type(other), type(self)))",
"def __sub__(self, other):\n output = Spectrum(self.wavelengths, self.intensities)\n for wavelength, intensity in other:\n if output[wavelength]:\n output[wavelength] -= intensity\n else:\n output[wavelength] = -intensity\n return output",
"def __sub__(self, other):\n\t\tif isinstance(other, int) or isinstance(other, float):\n\t\t\t# Maintain state of self and create new trace variable new_var\n\t\t\tnew_var = Var(self.val, self.der)\n\t\t\treturn new_var.__add__(-other)\n\t\treturn (-other).__add__(self)",
"def vd(v2,v1):\n return v2-v1",
"def __rsub__(self, other):\n try:\n total = {self.var: 1, other.var: -1}\n return AutoDiffReverse(self.val - other.val, None, der=total)\n except AttributeError:\n return AutoDiffReverse(self.val - other, None, {self.var: -1})",
"def __sub__(self, other):\n\n self._add_sub_error_checking(other)\n try:\n kwargs = {'cps': self.cps - other.cps}\n if (self._cps is None) or (other._cps is None):\n warnings.warn('Subtraction of counts-based specta, spectra ' +\n 'have been converted to CPS', SpectrumWarning)\n except SpectrumError:\n try:\n kwargs = {'counts': self.counts_vals - other.counts_vals}\n kwargs['uncs'] = [np.nan]*len(self)\n warnings.warn('Subtraction of counts-based spectra, ' +\n 'livetimes have been ignored.', SpectrumWarning)\n except SpectrumError:\n raise SpectrumError(\n 'Subtraction of counts and CPS-based spectra without' +\n 'livetimes not possible')\n spect_obj = Spectrum(\n bin_edges_kev=self.bin_edges_kev, **kwargs)\n return spect_obj",
"def __sub__(self, other):\n if hasattr(other, '_d'):\n return (self.micros() - other.micros()) / 86400000000.0\n else:\n return self.__add__(-(other))",
"def __sub__(self, other):\r\n if isinstance(other, vec4):\r\n return vec4(self.x-other.x, self.y-other.y, self.z-other.z, self.w-other.w)\r\n else:\r\n raise TypeError, \"unsupported operand type for -\"",
"def __sub__(self: _TT, other: _TT) -> _TT:\n if type(self) != type(other):\n raise TypeError(\"Types do not match\")\n return type(self)(str(self.value - other.value),\"\")",
"def __sub__(self, other):\n return Difference(self, other)",
"def __sub__(self, other):\n difference = EITFrame(other.__time_stamp,\n self.__dummy - other.__dummy,\n np.subtract(self.__image, other.__image),\n BreathPhaseMarker.TIDAL,\n None,\n None,\n self.__timing_error + other.__timing_error,\n [])\n return difference",
"def __sub__(self, other):\n return Vector([c1 - c2 for (c1, c2) in zip(self.components, other.components)])",
"def __rtruediv__(self, other):\n value = -1 / (self.val * self.val)\n total = {self.var: other * value}\n return AutoDiffReverse(other / self.val, None, total)",
"def __sub__(self, other):\n tmp = VectorHeat1D(self.size)\n tmp.set_values(self.get_values() - other.get_values())\n return tmp",
"def __sub__(self, other):\n\n if isinstance(other, type(self)):\n # always create new particles, since otherwise c = a - b changes a as well!\n p = particles(self)\n p.pos[:] = self.pos - other.pos\n p.vel[:] = self.vel - other.vel\n p.m = self.m\n p.q = self.q\n return p\n else:\n raise DataError(\"Type error: cannot subtract %s from %s\" % (type(other), type(self)))",
"def __sub__(self,other):\n self._obj['u'] -= other._obj['u']\n self._obj['v'] -= other._obj['v']\n return self._obj",
"def __sub__(self,other):\n self.numerator=self.numerator*other.denominator\n other.numerator=self.denominator*other.numerator\n resultnumerator = self.numerator-other.numerator\n resultdenominator = self.denominator*other.denominator \n newvalues = (resultnumerator,resultdenominator)\n return newvalues",
"def subtract(self, other, label=None, atol=1.0E-12):\n # check the two solutions share the same grid\n assert numpy.allclose(self.x, other.x, atol=atol)\n assert numpy.allclose(self.y, other.y, atol=atol)\n assert self.values.shape == other.values.shape\n if not label:\n label = self.label + '-subtracted'\n return Field(label=label,\n time_step=self.time_step,\n x=self.x, y=self.y,\n values=self.values - other.values)",
"def __sub__(self, other):\n if not self.unit.is_compatible(other.unit):\n raise TypeError('Cannot subtract two quantities with incompatible units \"%s\" and \"%s\".' % (self.unit, other.unit))\n value = self._value - other.value_in_unit(self.unit)\n unit = self.unit\n return Quantity(value, unit)",
"def __sub__(self, other):\n n = len(self)\n\n if n != len(other):\n raise(VetorError, \"Vetor dimensions are not equal\")\n\n v = zeros_como(self)\n\n for i in range(n):\n v[i] = self[i] - other[i]\n\n return v",
"def __sub__(self, other):\n if isinstance(other, Factorization):\n other = other.value()\n return self.value() - other",
"def __sub__(self, other):\n top = self.num*other.denom - self.denom*other.num\n bott = self.denom*other.denom\n return fraction(top, bott)",
"def __sub__(self, other):\n return self.subtract(other)",
"def __sub__(self,other):\n return np.linalg.norm(self.ngdv-other.ngdv)",
"def __truediv__(self, other):\n try:\n value = -1 / (other.val * other.val)\n total = {self.var: 1 / other.val, other.var: value * self.val}\n return AutoDiffReverse(self.val / other.val, None, total)\n except AttributeError:\n total = {self.var: 1 / other}\n return AutoDiffReverse(self.val / other, None, total)",
"def minus(self, a, b):\n return a - b"
] | [
"0.6897913",
"0.686807",
"0.68031883",
"0.67141724",
"0.6677903",
"0.6609048",
"0.66008663",
"0.6557345",
"0.6422639",
"0.6399935",
"0.6376798",
"0.6290664",
"0.62754256",
"0.62695223",
"0.6264662",
"0.6258906",
"0.6256665",
"0.6246081",
"0.622621",
"0.621305",
"0.61873764",
"0.6182269",
"0.61744285",
"0.6154976",
"0.6132522",
"0.6128664",
"0.61260515",
"0.6110292",
"0.61068183",
"0.6106811"
] | 0.7975307 | 0 |
Multiply a Volt() object. If multiplying by a int or float the self. Multiply two Volt() objects together, returning the product of the two objects. | def __mul__(self, other):
if isinstance(other, int) or isinstance(other, float):
return Volt(self.volts * other, self.volt_unit, self.freq, self.freq_unit)
else:
if self.volt_unit != other.volt_unit:
raise ArithmeticError(f"The objects' volt units {self.volt_unit} and {other.volt_unit} are not the"
f" same.")
if self.freq != other.frequency:
raise ArithmeticError(f"The objects' frequency {self.freq} and {other.frequency} are not the same.")
if self.freq_unit != other.freq_unit:
raise ArithmeticError(f"The objects' frequency units {self.freq_unit} and {other.freq_unit} "
f"are not the same.")
prod_sum = self.volts * other.volts
return Volt(prod_sum, self.volt_unit, self.freq, self.freq_unit) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __mul__(self, other):\n # print other\n if type(other) == int or type(other) == float:\n return self.scale(other)\n elif type(other) == Vector:\n return self.dot(other)\n else:\n return NotImplemented",
"def __mul__(self, other):\r\n return self.prod(other)",
"def __mul__(self, other):\n if is_unit(other):\n # print \"quantity * unit\"\n # Many other mul/div operations delegate to here because I was debugging\n # a dimensionless unit conversion problem, which I ended up fixing within\n # the reduce_unit() method.\n unit = self.unit * other\n return Quantity(self._value, unit).reduce_unit(self.unit)\n elif is_quantity(other):\n # print \"quantity * quantity\"\n # Situations where the units cancel can result in scale factors from the unit cancellation.\n # To simplify things, delegate Quantity * Quantity to (Quantity * scalar) * unit\n return (self * other._value) * other.unit\n else:\n # print \"quantity * scalar\"\n return self._change_units_with_factor(self.unit, other, post_multiply=False)",
"def mul(self, other):\n\n return self._get(\"mul\", other, self.__class__)",
"def _multiply(self, other):\n raise NotImplementedError(\n \"{} does not support scalar multiplication\".format(type(self)))",
"def __mul__(self: _TT, other: float) -> _TT:\n return type(self)(str(self.value * other),\"\")",
"def __mul__(self, other):\r\n\r\n T = type(other)\r\n # vec4*scalar\r\n if T==types.FloatType or T==types.IntType or T==types.LongType:\r\n return vec4(self.x*other, self.y*other, self.z*other, self.w*other)\r\n # vec4*vec4\r\n if isinstance(other, vec4):\r\n return self.x*other.x + self.y*other.y + self.z*other.z + self.w*other.w\r\n # unsupported\r\n else:\r\n # Try to delegate the operation to the other operand\r\n if getattr(other,\"__rmul__\",None)!=None:\r\n return other.__rmul__(self)\r\n else:\r\n raise TypeError, \"unsupported operand type for *\"",
"def __mul__(self, other):\n if isinstance(other, int) or isinstance(other, float) or isinstance(other, complex):\n return Power(self.power * other, self.power_unit, self.freq, self.freq_unit)\n if self.power_unit != other.power_unit:\n raise ArithmeticError(f\"The objects' power units {self.power_unit} \"\n f\"and {other.power_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n power_prod = self.power * other.power\n return Power(power_prod, self.power_unit, self.freq, self.freq_unit)",
"def __mul__ (self, other): \n if isinstance(other, Number):\n return self._scale(other)\n elif isinstance(other, Matrix):\n return self._mul(other)\n elif isinstance(other, Vector):\n return self._vecmul(other)\n else:\n return NotImplemented",
"def multiply(self, other):\n from divisi2 import operators\n return operators.multiply(self, other)",
"def multiply(self: T, other: T) -> T:",
"def __mul__(self, other):\n try:\n total = {self.var: other.val, other.var: self.val}\n return AutoDiffReverse(self.val * other.val, None, total)\n except AttributeError:\n return AutoDiffReverse(self.val * other, None, {self.var: other})",
"def __mul__(self, other) -> 'MultiVector':\n\n other, mv = self._checkOther(other, coerce=False)\n\n if mv:\n newValue = self.layout.gmt_func(self.value, other.value)\n else:\n if isinstance(other, np.ndarray):\n obj = self.__array__()\n return obj*other\n\n newValue = other * self.value\n\n return self._newMV(newValue)",
"def __mul__(self, other):\n if type(other) == int or type(other) == float:\n return Ccy(self.value * other, self.unit)\n else:\n raise TypeError(\"unsupported operand type(s) for *: 'Ccy' and \" + type(other).__name__)",
"def _mul(self, other):\n return None",
"def __mul__(self, other):\n if isinstance(other, Vector):\n return self.dot(other)\n else:\n raise TypeError(other)",
"def mul(self, a, b):\n return a * b",
"def __mul__(self, other): \n if isinstance(other, Iterable):\n # dot product\n return self.x * other[0] + self.y * other[1]\n else:\n # scalar product\n return Vector(self.x * other, self.y * other)",
"def __mul__(self, other):\n if isinstance(other, (int, float)):\n return Matrix([[self.values[row][index] * other\n for index in range(len(self.values[0]))]\n for row in range(len(self.values))])\n\n elif isinstance(other, Vector):\n return Vector([other.dot(Vector(row)) for row in self.values])\n\n elif isinstance(other, Matrix):\n return Matrix([(other.transpose() * Vector(row)).values\n for row in self.values])",
"def __mul__(self, other):\n\n return self._mul_div(other, div=False)",
"def __mul__(self, other):\n if isinstance(other, numbers.Number):\n # scalar multiplication for numbers\n new_point = [x * other for x in self.coords]\n return self.__class__(new_point)",
"def __mul__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Amp(self.amps * other, self.amp_unit, self.freq, self.freq_unit)\n if self.amp_unit != other.amp_unit:\n raise ArithmeticError(f\"The objects' amp units {self.amp_unit} and {other.amp_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n prod_sum = self.amps * other.amps\n return Amp(prod_sum, self.amp_unit, self.freq, self.freq_unit)",
"def __mul__(self, othertr):\n res = self.dot(othertr)\n return res",
"def __mul__(self,other):\n if type(other) is Vector:\n return(self.x*other.x + self.y*other.y + self.z*other.z)\n else:\n return(Vector(self.x*other,self.y*other,self.z*other))",
"def mul(self, b):\n self.a *= float(b)",
"def __mul__(self,l):\r\n\t\t\r\n\t\t# multiply\r\n\t\tm = self.multiply(l)\r\n\t\t\r\n\t\treturn m",
"def __mul__(self, other, **kwargs):\n kwargs.update({'operator': 'mul'})\n return self.__add__(other, **kwargs)",
"def __mul__(self, other):\n\n s = len(self)\n v = zeros_como(self)\n\n if isinstance(other, Vetor):\n # Both operands are Vetors\n # In this case perform a element wise product\n r = len(other)\n\n if s != r:\n raise(VetorError, \"Vetor dimensions are not equal\")\n\n for i in range(s):\n v[i] = self[i] * other[i]\n else:\n # check if other is a scalar\n if hasattr(other, \"__len__\"):\n raise(VetorError, \"Operand isn't an scalar\")\n\n for i in range(s):\n v[i] = other * self[i]\n\n return v",
"def __imul__(self, other):\r\n T = type(other)\r\n # vec4*=scalar\r\n if T==types.FloatType or T==types.IntType or T==types.LongType:\r\n self.x*=other\r\n self.y*=other\r\n self.z*=other\r\n self.w*=other\r\n return self\r\n else:\r\n raise TypeError, \"unsupported operand type for *=\"",
"def multiply(self):\n return self._do_calc(self.multiplier)"
] | [
"0.74306023",
"0.7297101",
"0.72146547",
"0.71841425",
"0.71742857",
"0.7134762",
"0.7093439",
"0.70923084",
"0.7059664",
"0.6998358",
"0.6965713",
"0.69609094",
"0.6956492",
"0.69293594",
"0.6906382",
"0.6905946",
"0.68920153",
"0.6883814",
"0.6873146",
"0.68704355",
"0.68520254",
"0.6848586",
"0.68443",
"0.6791419",
"0.6789696",
"0.6779324",
"0.6777122",
"0.67185855",
"0.6711384",
"0.66897416"
] | 0.83590376 | 0 |
The volt attribute setter. | def volts(self, volt: NumType):
self._volt = volt | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_setVoltage(self):\n self.read(\":VOLT?\")",
"def setvoltages(self):\n pass",
"def set_volt(self,volt):\n self.spi_disable()\n self.spi_enable()\n data1=volt*256//self.vref\n if data1>255:\n data1=255\n if data1<0:\n data1=0\n data1=int(data1)\n temp0=AD5601_mode[\"normal\"]|((data1>>2)&0x00ff)\n temp1=(data1<<6)&0x00ff\n data=[temp0,temp1]\n return self.spi_device.write(data)",
"def v(self, v):\n self._v = v",
"def set_voltage(self, value):\n self.write(\":VOLT {}V\".format(value))",
"def set_attribute(self, name, value):\n\n pass",
"def set_voltage(self, v):\n self.environment.set_voltage(self.neuron_id, v)",
"def __set__(self, instance, val):\n raise AttributeError(\"Can't set attribute\")",
"def v(self, v):\n\n self._v = v",
"def v(self, v):\n\n self._v = v",
"def set_voltage(self, millivolts):\n assert 3060 <= millivolts <= 10680, \"Voltage must be between 3,060 and 10,680 mV.\"\n assert self.instr == self.INSTR_EXT, \"Please switch to extended instruction set first.\"\n self.voltage = millivolts\n basevoltage = millivolts - 3060\n incrementor = basevoltage // 60\n code = 0x80 & incrementor\n self.command([code])",
"def __init__(self):\n super().__init__()\n self.metric = 'VOLSMTY'",
"def v(self, v) :\n\t\ttry :\n\t\t\tself._v = v\n\t\texcept Exception as e:\n\t\t\traise e",
"def __init__(self):\n super().__init__()\n self.metric = 'GTVOL'",
"def setVolume(self, *args):\n return _libsbml.Compartment_setVolume(self, *args)",
"def bv(self, bv):\n\n self._bv = bv",
"def volume(self, values):\n self._vart = float(values.get('art', self._vart))\n self._vven = float(values.get('ven', self._vven))",
"def volume(self, values):\n self._vart = float(values.get('art', self._vart))\n self._vven = float(values.get('ven', self._vven))",
"def volume(self, values):\n self._vart = float(values.get('art', self._vart))\n self._vven = float(values.get('ven', self._vven))",
"def setVelocity(self,v):\n if v is None:\n self.v = Cartesian3DVector()\n else:\n if isinstance(v,Cartesian3DVector):\n self.v = Cartesian3DVector(v.x,v.y,v.z)\n else:\n raise CoordinateVector(\"Initializing a particle with the incorrect velocity vector type.\")",
"def set_voltage(self, v, ch): \n self.write(\"VSET\" + str(ch) + \":\" + str(v) + \"\\n\")",
"def pvd(self, pvd):\n\n self.logger.debug(\"In 'pvd' setter.\")\n\n self._pvd = pvd",
"def set(self, v):\n self.components = v.components",
"def __init__(self):\n super().__init__()\n self.metric = 'VOL'",
"def volts(self) -> NumType:\n return self._volt",
"def setVelocity(self, new_vel):\n\n self.vel = limiter(new_vel)",
"async def set_volume(self, vol: int):\n self.volume = max(min(vol, 150), 0)\n await self._bot.lavalink.ws.send(op='volume', guildId=self.guild_id, volume=self.volume)",
"def volume(self, v: int) -> None:\n # changed so it returns to the default volume\n if v > VOLUME_MAX:\n self._volume = VOLUME_DEFAULT\n elif v < VOLUME_MIN:\n self._volume = VOLUME_MIN\n else:\n self._volume = v",
"def lvad(self, value):\n self._lvad = value",
"def lvad(self, value):\n self._lvad = value"
] | [
"0.64910316",
"0.6318452",
"0.6313791",
"0.61006796",
"0.60669094",
"0.60529065",
"0.60273206",
"0.5980553",
"0.59714717",
"0.59714717",
"0.5874598",
"0.5865234",
"0.5841621",
"0.5784756",
"0.5784631",
"0.5780349",
"0.57798725",
"0.57798725",
"0.57798725",
"0.57736105",
"0.5758997",
"0.5757256",
"0.5731055",
"0.5721771",
"0.57133037",
"0.57068104",
"0.5699794",
"0.568901",
"0.56874305",
"0.56874305"
] | 0.7285518 | 0 |
There are two modes. 1. Add an int/float to an Amps() object. Return a new Amp() object with '.amps' that is the sum of the 'self.amps' and the passed int/float. 2. Adding two Amp() objects together, returning a new Amp() object with '.amps' that is the sum of 'self.amps' and 'other.amps'. | def __add__(self, other):
if isinstance(other, int) or isinstance(other, float):
return Amp(self.amps + other, self.amp_unit, self.freq, self.freq_unit)
if self.amp_unit != other.amp_unit:
raise ArithmeticError(f"The objects' amp units {self.amp_unit} and {other.amp_unit} are not the same.")
if self.freq != other.frequency:
raise ArithmeticError(f"The objects' frequency {self.freq} and {other.frequency} are not the same.")
if self.freq_unit != other.freq_unit:
raise ArithmeticError(f"The objects' frequency units {self.freq_unit} and {other.freq_unit} "
f"are not the same.")
amp_sum = self.amps + other.amps
return Amp(amp_sum, self.amp_unit, self.freq, self.freq_unit) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __mul__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Amp(self.amps * other, self.amp_unit, self.freq, self.freq_unit)\n if self.amp_unit != other.amp_unit:\n raise ArithmeticError(f\"The objects' amp units {self.amp_unit} and {other.amp_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n prod_sum = self.amps * other.amps\n return Amp(prod_sum, self.amp_unit, self.freq, self.freq_unit)",
"def __add__(self, other):\n return add_mps(self, other)",
"def __sub__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Amp(self.amps - other, self.amp_unit, self.freq, self.freq_unit)\n if self.amp_unit != other.amp_unit:\n raise ArithmeticError(f\"The objects' amp units {self.amp_unit} and {other.amp_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n amp_sum = self.amps - other.amps\n return Amp(amp_sum, self.amp_unit, self.freq, self.freq_unit)",
"def __add__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Volt(self.volts + other, self.volt_unit, self.freq, self.freq_unit)\n if self.volt_unit != other.volt_unit:\n raise ArithmeticError(f\"The objects' volt units {self.volt_unit} and {other.volt_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n volt_sum = self.volts + other.volts\n return Volt(volt_sum, self.volt_unit, self.freq, self.freq_unit)",
"def __add__(self, other):\n\n self._add_sub_error_checking(other)\n if (self._counts is None) ^ (other._counts is None):\n raise SpectrumError(\n 'Addition of counts-based and CPS-based spectra is ' +\n 'ambiguous, use Spectrum(counts=specA.counts+specB.counts) ' +\n 'or Spectrum(cps=specA.cps+specB.cps) instead.')\n\n if self._counts is not None and other._counts is not None:\n kwargs = {'counts': self.counts + other.counts}\n if self.livetime and other.livetime:\n kwargs['livetime'] = self.livetime + other.livetime\n else:\n warnings.warn('Addition of counts with missing livetimes, ' +\n 'livetime was set to None.', SpectrumWarning)\n else:\n kwargs = {'cps': self.cps + other.cps}\n spect_obj = Spectrum(\n bin_edges_kev=self.bin_edges_kev, **kwargs)\n return spect_obj",
"def __add__(self, other):\n output = Spectrum(self.wavelengths, self.intensities)\n for wavelength, intensity in other:\n if output[wavelength]:\n output[wavelength] += intensity\n else:\n output[wavelength] = intensity\n return output",
"def __truediv__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Amp(self.amps / other, self.amp_unit, self.freq, self.freq_unit)\n if self.amp_unit != other.amp_unit:\n raise ArithmeticError(f\"The objects' amp units {self.amp_unit} and {other.amp_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n prod_sum = self.amps / other.amps\n return Amp(prod_sum, self.amp_unit, self.freq, self.freq_unit)",
"def __add__(self, other):\n new_measure = Measure()\n settings = [\"raw\", \"fil\"]\n\n for rf in settings:\n new_measure.hit1[rf] = (self.hit1[rf] + other.hit1[rf])\n new_measure.hit3[rf] = (self.hit3[rf] + other.hit3[rf])\n new_measure.hit10[rf] = (self.hit10[rf] + other.hit10[rf])\n new_measure.mrr[rf] = (self.mrr[rf] + other.mrr[rf])\n new_measure.mr[rf] = (self.mr[rf] + other.mr[rf])\n return new_measure",
"def __add__(self, other):\n if isinstance(other, complex):\n return Power(self.power + other, self.power_unit, self.freq, self.freq_unit)\n if self.power_unit != other.power_unit:\n raise ArithmeticError(f\"The objects' ohm units {self.power_unit} and {other.power_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n power_sum = self.power + other.power\n return Power(power_sum, self.power_unit, self.freq, self.freq_unit)",
"def __iadd__(self, other):\n if abs(self.T - other.T) > 1E-5:\n msg = \"The two objects being added needs to have the same \"\n msg += \"temperature.\"\n raise ValueError(msg)\n\n if self.ref_value < other.ref_value:\n diff = self.ref_value - other.ref_value\n other._average *= np.exp(self.beta * diff)\n other.ref_value = self.ref_value\n else:\n diff = other.ref_value - self.ref_value\n self.ref_value = other.ref_value\n self._average *= np.exp(self.beta * diff)\n self._average += other._average\n self.num_samples += other.num_samples\n return self",
"def __add__(self, rhs: Union[float, Simpy]) -> Simpy:\n result: list[float] = []\n if isinstance(rhs, float):\n for item in self.values:\n result.append(item + rhs)\n else:\n assert len(self.values) == len(rhs.values)\n for i in range(len(self.values)):\n result.append(self.values[i] + rhs.values[i])\n return Simpy(result)",
"def __add__(self, other):\n cls = self.__class__\n return cls(self.x+other.x, self.y+other.y, self.z+other.z)",
"def __add__(self, other):\n if isinstance(other, float) or isinstance(other, int):\n return Complex(self._reNum + other, self._imNum)\n if isinstance(other, complex):\n return Complex(self._reNum + other.real, self._imNum + other.imag)\n return Complex(self._reNum + other._reNum, self._imNum + other._imNum)",
"def __add__(self, other):\n if other == 0:\n return self\n\n pmf = Pmf()\n for v1, p1 in self.items():\n for v2, p2 in other.items():\n pmf[v1 + v2] += p1 * p2\n return pmf",
"def __iadd__(self, other):\n\n if isinstance(other, float):\n self.iadd_scalar(other)\n else:\n self.iadd(other)",
"def __iadd__(self, other):\r\n if isinstance(other, vec4):\r\n self.x+=other.x\r\n self.y+=other.y\r\n self.z+=other.z\r\n self.w+=other.w\r\n return self\r\n else:\r\n raise TypeError, \"unsupported operand type for +=\"",
"def __add__(self, other):\n return Base40Pitch(self.base40 + other.base40)",
"def __add__(self, other):\n if (len(self.arg) < len(other.arg)):\n summ = Polynomial(other.arg)\n i = len(self.arg) - 1\n for x in self.arg:\n summ.arg[i] = self.arg[i] + summ.arg[i]\n i = i - 1\n else:\n summ = Polynomial(self.arg)\n i = len(other.arg) - 1\n for x in other.arg:\n summ.arg[i] = other.arg[i] + summ.arg[i]\n i = i - 1\n return summ",
"def __add__(self, other):\n return self.getArea() + other.getArea()",
"def __add__(self, other):\n real = self.real + other.real\n pure = self.pure + other.pure\n return Quaternion((real, pure))",
"def __add__(self, other):\n try:\n total = {self.var: 1, other.var: 1}\n return AutoDiffReverse(self.val + other.val, None, der=total)\n except AttributeError:\n return AutoDiffReverse(self.val + other, None, {self.var: 1})",
"def __add__(self, other):\n return self.add(other)",
"def __add__(self, other):\n return asarray(add(self, other))",
"def __add__(self, other):\n pass",
"def __add__(self, other):\n pass",
"def amps(self) -> NumType:\n return self._amp",
"def __add__(self, other):\n base = deepcopy(self)\n base += other # (+=) == __iadd__\n return base",
"def __add__(self, other):\n\n if isinstance(other, type(self)):\n # always create new fields, since otherwise c = a - b changes a as well!\n p = fields(self)\n p.elec[:] = self.elec + other.elec\n p.magn[:] = self.magn + other.magn\n return p\n else:\n raise DataError(\"Type error: cannot add %s to %s\" % (type(other), type(self)))",
"def __add__(self, other):\n\t\ttry:\n\t\t\tval = self.val + other.val\n\n\t\t\t# Handle case when self.der or other.der contains None \n\t\t\t# i.e. self or other is a vector of scalars, not of Vars\n\t\t\tlen_self_der_shape = len(self.der.shape)\n\t\t\tlen_other_der_shape = len(other.der.shape)\n\n\t\t\tif not len_self_der_shape and len_other_der_shape:\n\t\t\t\tder = other.der\n\t\t\telif len_self_der_shape and not len_other_der_shape:\n\t\t\t\tder = self.der\n\t\t\telif not len_self_der_shape and not len_other_der_shape:\n\t\t\t\tder = None\n\t\t\telse:\n\t\t\t\tder = self.der + other.der\n\t\texcept AttributeError:\n\t\t\tval = self.val + other\n\t\t\tder = self.der\n\t\treturn Var(val, der)",
"def __add__(self, other):\n raise NotImplementedError"
] | [
"0.7433785",
"0.714936",
"0.710161",
"0.6762452",
"0.67164207",
"0.66605616",
"0.66493183",
"0.65010947",
"0.6368861",
"0.63628244",
"0.6322242",
"0.629921",
"0.62943053",
"0.6284239",
"0.6264756",
"0.62547755",
"0.62463784",
"0.6244722",
"0.62421227",
"0.61927456",
"0.6185316",
"0.6180709",
"0.61767834",
"0.61544883",
"0.61544883",
"0.61525375",
"0.61465",
"0.61420083",
"0.6141474",
"0.6128696"
] | 0.8724514 | 0 |
1. Subtract a int/float from an Amp() object. Return a new Amp() object with '.amps' that is the difference of the 'self.amps' and the passed int/float. 2. Subtract two Amp() objects, returning a new Amp() object with '.amps' that is the difference of 'self.amps' and 'other.amps'. | def __sub__(self, other):
if isinstance(other, int) or isinstance(other, float):
return Amp(self.amps - other, self.amp_unit, self.freq, self.freq_unit)
if self.amp_unit != other.amp_unit:
raise ArithmeticError(f"The objects' amp units {self.amp_unit} and {other.amp_unit} are not the same.")
if self.freq != other.frequency:
raise ArithmeticError(f"The objects' frequency {self.freq} and {other.frequency} are not the same.")
if self.freq_unit != other.freq_unit:
raise ArithmeticError(f"The objects' frequency units {self.freq_unit} and {other.freq_unit} "
f"are not the same.")
amp_sum = self.amps - other.amps
return Amp(amp_sum, self.amp_unit, self.freq, self.freq_unit) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __truediv__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Amp(self.amps / other, self.amp_unit, self.freq, self.freq_unit)\n if self.amp_unit != other.amp_unit:\n raise ArithmeticError(f\"The objects' amp units {self.amp_unit} and {other.amp_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n prod_sum = self.amps / other.amps\n return Amp(prod_sum, self.amp_unit, self.freq, self.freq_unit)",
"def __sub__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Volt(self.volts - other, self.volt_unit, self.freq, self.freq_unit)\n if self.volt_unit != other.volt_unit:\n raise ArithmeticError(f\"The objects' volt units {self.volt_unit} and {other.volt_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n volt_sum = self.volts - other.volts\n return Volt(volt_sum, self.volt_unit, self.freq, self.freq_unit)",
"def __sub__(self, other):\n\n if isinstance(other, type(self)):\n # always create new fields, since otherwise c = a - b changes a as well!\n p = fields(self)\n p.elec[:] = self.elec - other.elec\n p.magn[:] = self.magn - other.magn\n return p\n else:\n raise DataError(\"Type error: cannot subtract %s from %s\" % (type(other), type(self)))",
"def __sub__(self, other):\n try:\n total = {self.var: 1, other.var: -1}\n return AutoDiffReverse(self.val - other.val, None, der=total)\n except AttributeError:\n return AutoDiffReverse(self.val - other, None, {self.var: 1})",
"def __mul__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Amp(self.amps * other, self.amp_unit, self.freq, self.freq_unit)\n if self.amp_unit != other.amp_unit:\n raise ArithmeticError(f\"The objects' amp units {self.amp_unit} and {other.amp_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n prod_sum = self.amps * other.amps\n return Amp(prod_sum, self.amp_unit, self.freq, self.freq_unit)",
"def __sub__(self, other):\n output = Spectrum(self.wavelengths, self.intensities)\n for wavelength, intensity in other:\n if output[wavelength]:\n output[wavelength] -= intensity\n else:\n output[wavelength] = -intensity\n return output",
"def __sub__(self, other):\n\t\tif isinstance(other, int) or isinstance(other, float):\n\t\t\t# Maintain state of self and create new trace variable new_var\n\t\t\tnew_var = Var(self.val, self.der)\n\t\t\treturn new_var.__add__(-other)\n\t\treturn (-other).__add__(self)",
"def __sub__(self, other):\n if hasattr(other, '_d'):\n return (self.micros() - other.micros()) / 86400000000.0\n else:\n return self.__add__(-(other))",
"def __sub__(self, other):\n\n self._add_sub_error_checking(other)\n try:\n kwargs = {'cps': self.cps - other.cps}\n if (self._cps is None) or (other._cps is None):\n warnings.warn('Subtraction of counts-based specta, spectra ' +\n 'have been converted to CPS', SpectrumWarning)\n except SpectrumError:\n try:\n kwargs = {'counts': self.counts_vals - other.counts_vals}\n kwargs['uncs'] = [np.nan]*len(self)\n warnings.warn('Subtraction of counts-based spectra, ' +\n 'livetimes have been ignored.', SpectrumWarning)\n except SpectrumError:\n raise SpectrumError(\n 'Subtraction of counts and CPS-based spectra without' +\n 'livetimes not possible')\n spect_obj = Spectrum(\n bin_edges_kev=self.bin_edges_kev, **kwargs)\n return spect_obj",
"def __sub__(self, other):\r\n if isinstance(other, vec4):\r\n return vec4(self.x-other.x, self.y-other.y, self.z-other.z, self.w-other.w)\r\n else:\r\n raise TypeError, \"unsupported operand type for -\"",
"def __isub__(self, other):\r\n if isinstance(other, vec4):\r\n self.x-=other.x\r\n self.y-=other.y\r\n self.z-=other.z\r\n self.w-=other.w\r\n return self\r\n else:\r\n raise TypeError, \"unsupported operand type for -=\"",
"def minus(self, a, b):\n return a - b",
"def __rsub__(self, other):\n\t\treturn (-self).__add__(float(other))",
"def __sub__(self, other):\n return self.subtract(other)",
"def __sub__(self, other):\n if isinstance(other, complex):\n return Power(self.power - other, self.power_unit, self.freq, self.freq_unit)\n if self.power_unit != other.power_unit:\n raise ArithmeticError(f\"The objects' ohm units {self.power_unit} and {other.power_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n power_sum = self.power - other.power\n return Power(power_sum, self.power_unit, self.freq, self.freq_unit)",
"def __sub__(self, other):\n return Base40Pitch(self.base40 - other.base40)",
"def __sub__( self, other ) :\n\n try :\n other = float( other )\n c_ls = self.copy( )\n for l, c_l in enumerate( c_ls ) : c_ls.coefficients[l] -= other\n except :\n self.checkSameSeriesType( other )\n c_l1, c_l2 = self.coefficients, other.coefficients\n if( len( self ) < len( other ) ) : c_l1, c_l2 = c_l2, c_l1\n c_ls = c_l1.copy( )\n for l, c_l in enumerate( c_l2 ) : c_ls.coefficients[l] += c_l\n return( c_ls )",
"def __rsub__(self, other):\n try:\n total = {self.var: 1, other.var: -1}\n return AutoDiffReverse(self.val - other.val, None, der=total)\n except AttributeError:\n return AutoDiffReverse(self.val - other, None, {self.var: -1})",
"def __sub__(self, other):\n return Difference(self, other)",
"def __add__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Amp(self.amps + other, self.amp_unit, self.freq, self.freq_unit)\n if self.amp_unit != other.amp_unit:\n raise ArithmeticError(f\"The objects' amp units {self.amp_unit} and {other.amp_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n amp_sum = self.amps + other.amps\n return Amp(amp_sum, self.amp_unit, self.freq, self.freq_unit)",
"def __sub__(self, other):\n\n if isinstance(other, type(self)):\n # always create new particles, since otherwise c = a - b changes a as well!\n p = particles(self)\n p.pos[:] = self.pos - other.pos\n p.vel[:] = self.vel - other.vel\n p.m = self.m\n p.q = self.q\n return p\n else:\n raise DataError(\"Type error: cannot subtract %s from %s\" % (type(other), type(self)))",
"def __sub__(self,other):\n self._obj['u'] -= other._obj['u']\n self._obj['v'] -= other._obj['v']\n return self._obj",
"def __isub__(self, other):\n other_data = self._setup_numeric(other)\n self.data[:] = self.data - other_data\n\n return self",
"def __sub__(self, other):\n return MyTime(0,0,self.to_seconds() - other.to_seconds())",
"def __sub__(self, other: 'ModelParameters') -> 'ModelParameters':\n return ModelParameters([self[idx] - other[idx] for idx in range(len(self))])",
"def subtract(self, other):\n return self.add(other.neg())",
"def __sub__(self: _TT, other: _TT) -> _TT:\n if type(self) != type(other):\n raise TypeError(\"Types do not match\")\n return type(self)(str(self.value - other.value),\"\")",
"def __sub__(self, other):\n if isinstance(other, Factorization):\n other = other.value()\n return self.value() - other",
"def __sub__(self, other):\n return self.__add__(other.__neg__())",
"def __rmul__(self, other):\n\n if isinstance(other, float):\n # always create new fields, since otherwise c = a - b changes a as well!\n p = fields(self)\n p.elec[:] = other * self.elec\n p.magn[:] = other * self.magn\n return p\n else:\n raise DataError(\"Type error: cannot multiply %s with %s\" % (type(other), type(self)))"
] | [
"0.68999416",
"0.6833628",
"0.676385",
"0.66355044",
"0.6598059",
"0.6590933",
"0.6540221",
"0.6534598",
"0.65079206",
"0.64034814",
"0.6343812",
"0.63341063",
"0.6326702",
"0.63187754",
"0.6307621",
"0.6303723",
"0.62694395",
"0.624874",
"0.6240412",
"0.6204857",
"0.62013614",
"0.6198369",
"0.61758894",
"0.6175523",
"0.6103616",
"0.610253",
"0.607393",
"0.60677236",
"0.6062413",
"0.60499203"
] | 0.84507024 | 0 |
Multiply two Amp() objects together, returning the product of the two objects. | def __mul__(self, other):
if isinstance(other, int) or isinstance(other, float):
return Amp(self.amps * other, self.amp_unit, self.freq, self.freq_unit)
if self.amp_unit != other.amp_unit:
raise ArithmeticError(f"The objects' amp units {self.amp_unit} and {other.amp_unit} are not the same.")
if self.freq != other.frequency:
raise ArithmeticError(f"The objects' frequency {self.freq} and {other.frequency} are not the same.")
if self.freq_unit != other.freq_unit:
raise ArithmeticError(f"The objects' frequency units {self.freq_unit} and {other.freq_unit} "
f"are not the same.")
prod_sum = self.amps * other.amps
return Amp(prod_sum, self.amp_unit, self.freq, self.freq_unit) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __mul__(self, other):\r\n return self.prod(other)",
"def mul(self, a: 'PFElement', b: 'PFElement') -> 'PFElement':\n return self(self._pf_mul(a.value, b.value, self.multiplicative_group))",
"def mul(self, a, b):\n return a * b",
"def __mul__(self, other, **kwargs):\n kwargs.update({'operator': 'mul'})\n return self.__add__(other, **kwargs)",
"def __mul__(self, other):\n\n return self._binary_elementwise_op(other, np.multiply)",
"def __mul__(self, other):\n try:\n total = {self.var: other.val, other.var: self.val}\n return AutoDiffReverse(self.val * other.val, None, total)\n except AttributeError:\n return AutoDiffReverse(self.val * other, None, {self.var: other})",
"def __mul__(self, other):\n return sum(self._ar * other._ar)",
"def product(self, x, y):\n return self( x.lift() * y.lift() )",
"def multiply(self, other):\n from divisi2 import operators\n return operators.multiply(self, other)",
"def __mul__(self, other):\n\n return self._mul_div(other, div=False)",
"def _mul(self, other):\n return None",
"def multiply(self, a, b):\n return a * b",
"def _mul(self, other):\n if isinstance(other, SeqFormula):\n form1, v1 = self.formula, self.variables[0]\n form2, v2 = other.formula, other.variables[0]\n formula = form1 * form2.subs(v2, v1)\n start, stop = self._intersect_interval(other)\n return SeqFormula(formula, (v1, start, stop))",
"def mul(self, other):\n return self._new_rep(self.rep * other)",
"def mul(self, other):\n\n return self._get(\"mul\", other, self.__class__)",
"def __call__(self, a, b):\n self.a = a\n self.b = b\n return a.data * b.data",
"def __mul__(self, other):\n if isinstance(other, int) or isinstance(other, float) or isinstance(other, complex):\n return Power(self.power * other, self.power_unit, self.freq, self.freq_unit)\n if self.power_unit != other.power_unit:\n raise ArithmeticError(f\"The objects' power units {self.power_unit} \"\n f\"and {other.power_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n power_prod = self.power * other.power\n return Power(power_prod, self.power_unit, self.freq, self.freq_unit)",
"def multiplies(x, y):\n x[:] *= y[:]\n return x",
"def _multiply(self, other):\n raise NotImplementedError(\n \"{} does not support scalar multiplication\".format(type(self)))",
"def __mul__(self, A):\n pass",
"def __mul__(self, othertr):\n res = self.dot(othertr)\n return res",
"def _mul(a, b):\n return a * b",
"def __imul__(self, other):\n\n return self * other",
"def __mul__(self, other):\n return Trits(self.trits * other)",
"def __mul__(self, other): \n if isinstance(other, Iterable):\n # dot product\n return self.x * other[0] + self.y * other[1]\n else:\n # scalar product\n return Vector(self.x * other, self.y * other)",
"def __mul__(self,that):\n return self.__opExpand2(that, np.multiply)",
"def __mul__(self, other):\r\n\r\n T = type(other)\r\n # vec4*scalar\r\n if T==types.FloatType or T==types.IntType or T==types.LongType:\r\n return vec4(self.x*other, self.y*other, self.z*other, self.w*other)\r\n # vec4*vec4\r\n if isinstance(other, vec4):\r\n return self.x*other.x + self.y*other.y + self.z*other.z + self.w*other.w\r\n # unsupported\r\n else:\r\n # Try to delegate the operation to the other operand\r\n if getattr(other,\"__rmul__\",None)!=None:\r\n return other.__rmul__(self)\r\n else:\r\n raise TypeError, \"unsupported operand type for *\"",
"def multiply(lhs, rhs):\n return _make.multiply(lhs, rhs)",
"def product(a, b):\n return a * b",
"def __mul__(self, other):\n if not isinstance(other, UniSet):\n other = self.fam.c_uniset(other)\n return self.fam.c_mul(self, other)"
] | [
"0.7408632",
"0.7406158",
"0.7264367",
"0.6947231",
"0.6929308",
"0.6916226",
"0.6911664",
"0.6869576",
"0.6840827",
"0.68196017",
"0.6803583",
"0.67914593",
"0.6761148",
"0.67590225",
"0.6757337",
"0.67506737",
"0.6744073",
"0.66633433",
"0.66592115",
"0.6612321",
"0.66109776",
"0.65944076",
"0.6577108",
"0.65725017",
"0.6564663",
"0.65421194",
"0.65330803",
"0.65288883",
"0.6528558",
"0.6525179"
] | 0.80016875 | 0 |
There are two modes. 1. Add a complex to an Ohms() object. Return a new Ohm() object with '.ohm' that is the sum of the 'self.ohm' and the passed complex(). 2. Adding two Ohm() objects together, returning a new Ohm() object with '.ohm' that is the sum of 'self.ohm' and 'other.ohm'. | def __add__(self, other):
if isinstance(other, complex):
return Ohm(self.ohm + other, self.ohm_unit, self.freq, self.freq_unit)
if self.ohm_unit != other.ohm_unit:
raise ArithmeticError(f"The objects' ohm units {self.ohm_unit} and {other.ohm_unit} are not the same.")
if self.freq != other.frequency:
raise ArithmeticError(f"The objects' frequency {self.freq} and {other.frequency} are not the same.")
if self.freq_unit != other.freq_unit:
raise ArithmeticError(f"The objects' frequency units {self.freq_unit} and {other.freq_unit} "
f"are not the same.")
ohm_sum = self.ohm + other.ohm
return Ohm(ohm_sum, self.ohm_unit, self.freq, self.freq_unit) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __add__(self, other):\n if isinstance(other, complex):\n return Power(self.power + other, self.power_unit, self.freq, self.freq_unit)\n if self.power_unit != other.power_unit:\n raise ArithmeticError(f\"The objects' ohm units {self.power_unit} and {other.power_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n power_sum = self.power + other.power\n return Power(power_sum, self.power_unit, self.freq, self.freq_unit)",
"def __sub__(self, other):\n if isinstance(other, complex):\n return Ohm(self.ohm - other, self.ohm_unit, self.freq, self.freq_unit)\n if self.ohm_unit != other.ohm_unit:\n raise ArithmeticError(f\"The objects' ohm units {self.ohm_unit} and {other.ohm_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n ohm_sum = self.ohm - other.ohm\n return Ohm(ohm_sum, self.ohm_unit, self.freq, self.freq_unit)",
"def __add__(self,other):\n\t\treal = self.realPart + other.realPart\n\t\timaginary = self.imaginaryPart + other.imaginaryPart\n\n\t\t#create and return new complexnumber\n\t\treturn real,imaginary",
"def __add__(self, other):\n self.sum_complex_num = Complex((self.real + other.real), (self.imaginary + other.imaginary))\n return self.sum_complex_num",
"def __mul__(self, other):\n if isinstance(other, int) or isinstance(other, float) or isinstance(other, complex):\n return Ohm(self.ohm * other, self.ohm_unit, self.freq, self.freq_unit)\n if self.ohm_unit != other.ohm_unit:\n raise ArithmeticError(f\"The objects' ohm units {self.ohm_unit} and {other.ohm_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n prod_sum = self.ohm * other.ohm\n return Ohm(prod_sum, self.ohm_unit, self.freq, self.freq_unit)",
"def __add__(self, other):\n if isinstance(other, float) or isinstance(other, int):\n return Complex(self._reNum + other, self._imNum)\n if isinstance(other, complex):\n return Complex(self._reNum + other.real, self._imNum + other.imag)\n return Complex(self._reNum + other._reNum, self._imNum + other._imNum)",
"def __radd__(self, other):\n other = _to_complex(other)\n return ComplexVal(other.r + self.r, other.i + self.i)",
"def __truediv__(self, other):\n if isinstance(other, int) or isinstance(other, float) or isinstance(other, complex):\n return Ohm(self.ohm / other, self.ohm_unit, self.freq, self.freq_unit)\n if self.ohm_unit != other.ohm_unit:\n raise ArithmeticError(f\"The objects' ohm units {self.ohm_unit} and {other.ohm_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n prod_sum = self.ohm / other.ohm\n return Ohm(prod_sum, self.ohm_unit, self.freq, self.freq_unit)",
"def __add__(self, other):\n if other == 0:\n return self\n\n pmf = Pmf()\n for v1, p1 in self.items():\n for v2, p2 in other.items():\n pmf[v1 + v2] += p1 * p2\n return pmf",
"def __add__(self, other):\n\n if not isinstance(other, Photons):\n raise ValueError('Can only add a Photons object to another Photons object.')\n\n # don't want to modify what is being added\n other = other.copy()\n\n # make column units consistent with self\n other.match_units(self)\n\n # add and /or update observation columns as necessary\n self.add_observations_column()\n other.add_observations_column()\n n_obs_self = len(self.obs_metadata)\n other['n'] += n_obs_self\n\n # re-reference times to the datum of self\n other.set_time_datum(self.time_datum)\n\n # stack the data tables\n photons = _tbl.vstack([self.photons, other.photons])\n\n # leave it to the user to deal with sorting and grouping and dealing with overlap as they see fit :)\n obs_metadata = self.obs_metadata + other.obs_metadata\n obs_times = list(self.obs_times) + list(other.obs_times)\n obs_bandpasses = list(self.obs_bandpasses) + list(other.obs_bandpasses)\n\n return Photons(photons=photons, obs_metadata=obs_metadata, time_datum=self.time_datum, obs_times=obs_times,\n obs_bandpasses=obs_bandpasses)",
"def __add__(self,other):\n self._obj['u'] += other._obj['u']\n self._obj['v'] += other._obj['v']\n return self._obj",
"def __add__(self, other):\n assert isinstance(other, HStruct)\n return HStruct(*self.fields, *other.fields)",
"def __add__(self, oth):\n\t\tif not isinstance(oth, Matrix):\n\t\t\toth = Matrix(oth)\n\t\treturn self._add(oth)",
"def __add__(self, other):\n try:\n new_num = (self._num * other._den) + (self._den * other._num)\n new_den = (self._den * other._den)\n return Rational(new_num, new_den)\n except AttributeError:\n return (self + Rational.parse_number(other))",
"def __add__(self, other):\n\n if isinstance(other, type(self)):\n # always create new fields, since otherwise c = a - b changes a as well!\n p = fields(self)\n p.elec[:] = self.elec + other.elec\n p.magn[:] = self.magn + other.magn\n return p\n else:\n raise DataError(\"Type error: cannot add %s to %s\" % (type(other), type(self)))",
"def __add__(self, other):\n return self.add(other)",
"def __add__(self, other):\r\n return self.add(other)",
"def __add__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Amp(self.amps + other, self.amp_unit, self.freq, self.freq_unit)\n if self.amp_unit != other.amp_unit:\n raise ArithmeticError(f\"The objects' amp units {self.amp_unit} and {other.amp_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n amp_sum = self.amps + other.amps\n return Amp(amp_sum, self.amp_unit, self.freq, self.freq_unit)",
"def __add__(self, other):\n self.__dict__.update(other)\n return self",
"def __add__(self, other):\n if other is None:\n return self\n\n return super().__add__(other)",
"def __add__(self, other):\n cls = self.__class__\n return cls(self.x+other.x, self.y+other.y, self.z+other.z)",
"def __add__(self, other):\n try:\n total = {self.var: 1, other.var: 1}\n return AutoDiffReverse(self.val + other.val, None, der=total)\n except AttributeError:\n return AutoDiffReverse(self.val + other, None, {self.var: 1})",
"def basic_add(mv1, mv2):\n obj = expand(mv1.obj + mv2.obj)\n return MV(obj)",
"def __add__(self, other):\n new_measure = Measure()\n settings = [\"raw\", \"fil\"]\n\n for rf in settings:\n new_measure.hit1[rf] = (self.hit1[rf] + other.hit1[rf])\n new_measure.hit3[rf] = (self.hit3[rf] + other.hit3[rf])\n new_measure.hit10[rf] = (self.hit10[rf] + other.hit10[rf])\n new_measure.mrr[rf] = (self.mrr[rf] + other.mrr[rf])\n new_measure.mr[rf] = (self.mr[rf] + other.mr[rf])\n return new_measure",
"def __iadd__(self, other):\n self.MergeWith(other)\n return self",
"def __add__(self, other):\n add_mortgage = Mortgage(self.principle + other.principle,\n self.interest_rate + other.interest_rate,\n self.year + self.year)\n return add_mortgage",
"def __add__(self, other):\n return add_mps(self, other)",
"def __radd__(self, oth):\n\t\toth_m = oth\n\t\tif not isinstance(oth_m, Matrix):\n\t\t\toth_m = Matrix(oth_m)\n\t\tres_m = oth_m._add(self)\n\t\tif isinstance(oth,Matrix):\n\t\t\treturn res_m\n\t\telse:\n\t\t\treturn type(oth)(res_m._unnest())",
"def __iadd__(self, other):\n self.center += other.center\n self.radius += other.radius\n return self",
"def __add__(self, other):\n base = deepcopy(self)\n base += other # (+=) == __iadd__\n return base"
] | [
"0.75681996",
"0.74703264",
"0.74501014",
"0.7414552",
"0.7378725",
"0.7324489",
"0.68723",
"0.67236125",
"0.6660199",
"0.6646521",
"0.6638953",
"0.6621129",
"0.65180033",
"0.6511642",
"0.6492798",
"0.64850414",
"0.64789504",
"0.6464822",
"0.64206195",
"0.64200747",
"0.64186496",
"0.63900846",
"0.6388631",
"0.63867366",
"0.6351484",
"0.6334701",
"0.63266516",
"0.62811047",
"0.62796414",
"0.6278682"
] | 0.89833313 | 0 |
There are two modes. 1. Subtract a complex to an Ohms() object. Return a new Ohm() object with '.ohm' that is the sum of the 'self.ohm' and the passed complex(). 2. Subtract two Ohm() objects, returning a new Ohm() object with '.ohm' that is the difference of 'self.ohm' and 'other.ohm'. a) The frequencies (including the frequency units) of both objects must be equal. b) The ohm_unit of both objects must be equal. If any of these conditions are not met then an ArithmeticError exception will be raised. | def __sub__(self, other):
if isinstance(other, complex):
return Ohm(self.ohm - other, self.ohm_unit, self.freq, self.freq_unit)
if self.ohm_unit != other.ohm_unit:
raise ArithmeticError(f"The objects' ohm units {self.ohm_unit} and {other.ohm_unit} are not the same.")
if self.freq != other.frequency:
raise ArithmeticError(f"The objects' frequency {self.freq} and {other.frequency} are not the same.")
if self.freq_unit != other.freq_unit:
raise ArithmeticError(f"The objects' frequency units {self.freq_unit} and {other.freq_unit} "
f"are not the same.")
ohm_sum = self.ohm - other.ohm
return Ohm(ohm_sum, self.ohm_unit, self.freq, self.freq_unit) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __truediv__(self, other):\n if isinstance(other, int) or isinstance(other, float) or isinstance(other, complex):\n return Ohm(self.ohm / other, self.ohm_unit, self.freq, self.freq_unit)\n if self.ohm_unit != other.ohm_unit:\n raise ArithmeticError(f\"The objects' ohm units {self.ohm_unit} and {other.ohm_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n prod_sum = self.ohm / other.ohm\n return Ohm(prod_sum, self.ohm_unit, self.freq, self.freq_unit)",
"def __add__(self, other):\n if isinstance(other, complex):\n return Ohm(self.ohm + other, self.ohm_unit, self.freq, self.freq_unit)\n if self.ohm_unit != other.ohm_unit:\n raise ArithmeticError(f\"The objects' ohm units {self.ohm_unit} and {other.ohm_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n ohm_sum = self.ohm + other.ohm\n return Ohm(ohm_sum, self.ohm_unit, self.freq, self.freq_unit)",
"def __mul__(self, other):\n if isinstance(other, int) or isinstance(other, float) or isinstance(other, complex):\n return Ohm(self.ohm * other, self.ohm_unit, self.freq, self.freq_unit)\n if self.ohm_unit != other.ohm_unit:\n raise ArithmeticError(f\"The objects' ohm units {self.ohm_unit} and {other.ohm_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n prod_sum = self.ohm * other.ohm\n return Ohm(prod_sum, self.ohm_unit, self.freq, self.freq_unit)",
"def __sub__(self, other):\n if isinstance(other, complex):\n return Power(self.power - other, self.power_unit, self.freq, self.freq_unit)\n if self.power_unit != other.power_unit:\n raise ArithmeticError(f\"The objects' ohm units {self.power_unit} and {other.power_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n power_sum = self.power - other.power\n return Power(power_sum, self.power_unit, self.freq, self.freq_unit)",
"def __sub__(self,other):\n\t\treal = self.realPart - other.realPart\n\t\timaginary = self.imaginaryPart - other.imaginaryPart\n\n\t\t#create and return complexNumber\n\t\treturn real,imaginary",
"def __rsub__(self, other):\n other = _to_complex(other)\n return ComplexVal(other.r - self.r, other.i - self.i)",
"def __sub__(self, other):\n try:\n new_num = (self._num * other._den) - (self._den * other._num)\n new_den = (self._den * other._den)\n return Rational(new_num, new_den)\n except AttributeError:\n return (self - Rational.parse_number(other))",
"def __sub__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Volt(self.volts - other, self.volt_unit, self.freq, self.freq_unit)\n if self.volt_unit != other.volt_unit:\n raise ArithmeticError(f\"The objects' volt units {self.volt_unit} and {other.volt_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n volt_sum = self.volts - other.volts\n return Volt(volt_sum, self.volt_unit, self.freq, self.freq_unit)",
"def __add__(self, other):\n if isinstance(other, complex):\n return Power(self.power + other, self.power_unit, self.freq, self.freq_unit)\n if self.power_unit != other.power_unit:\n raise ArithmeticError(f\"The objects' ohm units {self.power_unit} and {other.power_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n power_sum = self.power + other.power\n return Power(power_sum, self.power_unit, self.freq, self.freq_unit)",
"def __sub__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Amp(self.amps - other, self.amp_unit, self.freq, self.freq_unit)\n if self.amp_unit != other.amp_unit:\n raise ArithmeticError(f\"The objects' amp units {self.amp_unit} and {other.amp_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n amp_sum = self.amps - other.amps\n return Amp(amp_sum, self.amp_unit, self.freq, self.freq_unit)",
"def __truediv__(self, other):\n if isinstance(other, int) or isinstance(other, float) or isinstance(other, complex):\n return Power(self.power / other, self.power_unit, self.freq, self.freq_unit)\n if self.power_unit != other.power_unit:\n raise ArithmeticError(f\"The objects' power units {self.power_unit} and {other.power_unit} are not the \"\n f\"same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n power_factor = self.power / other.power\n return Power(power_factor, self.power_unit, self.freq, self.freq_unit)",
"def __sub__(self, other):\n if not self.unit.is_compatible(other.unit):\n raise TypeError('Cannot subtract two quantities with incompatible units \"%s\" and \"%s\".' % (self.unit, other.unit))\n value = self._value - other.value_in_unit(self.unit)\n unit = self.unit\n return Quantity(value, unit)",
"def __sub__(self, other):\n if hasattr(other, '_d'):\n return (self.micros() - other.micros()) / 86400000000.0\n else:\n return self.__add__(-(other))",
"def complex_difference(c_1,c_2):\n return c_1 - c_2",
"def __sub__(self, other):\n top = self.num*other.denom - self.denom*other.num\n bott = self.denom*other.denom\n return Fraction(top, bott)",
"def __sub__(self, other):\n\n if isinstance(other, type(self)):\n # always create new fields, since otherwise c = a - b changes a as well!\n p = fields(self)\n p.elec[:] = self.elec - other.elec\n p.magn[:] = self.magn - other.magn\n return p\n else:\n raise DataError(\"Type error: cannot subtract %s from %s\" % (type(other), type(self)))",
"def __sub__(self, other):\n top = self.num*other.denom - self.denom*other.num\n bott = self.denom*other.denom\n return fraction(top, bott)",
"def __sub__( self, other ) :\n\n try :\n other = float( other )\n c_ls = self.copy( )\n for l, c_l in enumerate( c_ls ) : c_ls.coefficients[l] -= other\n except :\n self.checkSameSeriesType( other )\n c_l1, c_l2 = self.coefficients, other.coefficients\n if( len( self ) < len( other ) ) : c_l1, c_l2 = c_l2, c_l1\n c_ls = c_l1.copy( )\n for l, c_l in enumerate( c_l2 ) : c_ls.coefficients[l] += c_l\n return( c_ls )",
"def basic_sub(mv1, mv2):\n obj = expand(mv1.obj - mv2.obj)\n return MV(obj)",
"def __sub__(self, other):\n try:\n total = {self.var: 1, other.var: -1}\n return AutoDiffReverse(self.val - other.val, None, der=total)\n except AttributeError:\n return AutoDiffReverse(self.val - other, None, {self.var: 1})",
"def __rdiv__(self, other):\n other = _to_complex(other)\n return self.inv().__mul__(other)",
"def __sub__(self, other):\n return Difference(self, other)",
"def __mul__(self,other):\n\t\treal = (self.realPart * other.realPart) - (self.imaginaryPart * other.imaginaryPart)\n\t\timaginary = (self.realPart*other.imaginaryPart) + (self.imaginaryPart * other.realPart)\n\n\t\t# create and return complexNumber\n\t\treturn real,imaginary",
"def __sub__(self, other):\n if isinstance(other, Factorization):\n other = other.value()\n return self.value() - other",
"def __truediv__(self, other):\n try:\n assert not other.is_zero(), \"cannot divide by 0.\"\n new_num = (self._num * other._den)\n new_den = (self._den * other._num) \n return Rational(new_num, new_den)\n except AttributeError:\n return (self / Rational.parse_number(other))",
"def __sub__(self,other):\n self._obj['u'] -= other._obj['u']\n self._obj['v'] -= other._obj['v']\n return self._obj",
"def __eq__(self, other):\n return (self.real+(self.imag*1j)) == (other.real+(other.imag*1j))\n #return (Complex(self.real, self.imag) == Complex(other.real, other.imag))",
"def __add__(self,other):\n\t\treal = self.realPart + other.realPart\n\t\timaginary = self.imaginaryPart + other.imaginaryPart\n\n\t\t#create and return new complexnumber\n\t\treturn real,imaginary",
"def __rsub__(self, other):\n try:\n total = {self.var: 1, other.var: -1}\n return AutoDiffReverse(self.val - other.val, None, der=total)\n except AttributeError:\n return AutoDiffReverse(self.val - other, None, {self.var: -1})",
"def __sub__(self, other):\n # type: (object) -> Fraction\n dx = other\n if type(other) is float:\n dx = dectofr(other)\n return Fraction(self.numerator * dx.denominator - self.denominator * dx.numerator,\n self.denominator * dx.denominator)"
] | [
"0.7698359",
"0.76604456",
"0.74490535",
"0.733054",
"0.68389434",
"0.6429149",
"0.6335388",
"0.62942797",
"0.6225056",
"0.6210105",
"0.6170732",
"0.6101144",
"0.6083943",
"0.6012478",
"0.60102427",
"0.5950072",
"0.5921162",
"0.5875086",
"0.581623",
"0.58135456",
"0.5800836",
"0.58006907",
"0.5794987",
"0.57269716",
"0.57059544",
"0.56935656",
"0.5690583",
"0.56839246",
"0.56796426",
"0.56718755"
] | 0.90131366 | 0 |
Multiply two Ohm() objects together, returning the product of the two objects. | def __mul__(self, other):
if isinstance(other, int) or isinstance(other, float) or isinstance(other, complex):
return Ohm(self.ohm * other, self.ohm_unit, self.freq, self.freq_unit)
if self.ohm_unit != other.ohm_unit:
raise ArithmeticError(f"The objects' ohm units {self.ohm_unit} and {other.ohm_unit} are not the same.")
if self.freq != other.frequency:
raise ArithmeticError(f"The objects' frequency {self.freq} and {other.frequency} are not the same.")
if self.freq_unit != other.freq_unit:
raise ArithmeticError(f"The objects' frequency units {self.freq_unit} and {other.freq_unit} "
f"are not the same.")
prod_sum = self.ohm * other.ohm
return Ohm(prod_sum, self.ohm_unit, self.freq, self.freq_unit) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __mul__(self, y):\n sum = 0\n x = self\n if len(x) > len(y):\n x, y = y, x\n for key in x:\n if key not in y:\n continue\n sum += x[key] * y[key]\n return sum",
"def __mul__(self, y):\n sum = 0\n x = self\n if len(x) > len(y):\n x, y = y, x\n for key in x:\n if key not in y:\n continue\n sum += x[key] * y[key]\n return sum",
"def __mul__(self, other):\r\n return self.prod(other)",
"def __mul__(self, other):\n try:\n total = {self.var: other.val, other.var: self.val}\n return AutoDiffReverse(self.val * other.val, None, total)\n except AttributeError:\n return AutoDiffReverse(self.val * other, None, {self.var: other})",
"def multiply(self, other):\n from divisi2 import operators\n return operators.multiply(self, other)",
"def mul(self, a, b):\n return a * b",
"def mul(self, other):\n\n return self._get(\"mul\", other, self.__class__)",
"def __mul__(self, other, **kwargs):\n kwargs.update({'operator': 'mul'})\n return self.__add__(other, **kwargs)",
"def __mul__(self, other):\n\n return self._mul_div(other, div=False)",
"def __mul__(self,y): \n\n # BZO mulitplication\n if type(y)==type(self):\n Out = self._CreateSameType()\n \n for Ind1 in self.IndList():\n Obj1=self[Ind1]\n for Ind2 in y.IndList():\n Obj2=y[Ind2]\n \n Ind3 = tuple(add(Ind1,Ind2))\n \n Out[Ind3] += Obj1*Obj2\n \n # Scalar multiplicatin\n else:\n\n Out = self._CreateSameType()\n\n Out.SetLists(self.IndList(),[y*x for x in self.__ObjList])\n\n # Multiplication with item of its own type\n \n \n \n \n \n return Out",
"def __mul__(self, other):\n if isinstance(other, int) or isinstance(other, float) or isinstance(other, complex):\n return Power(self.power * other, self.power_unit, self.freq, self.freq_unit)\n if self.power_unit != other.power_unit:\n raise ArithmeticError(f\"The objects' power units {self.power_unit} \"\n f\"and {other.power_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n power_prod = self.power * other.power\n return Power(power_prod, self.power_unit, self.freq, self.freq_unit)",
"def _mul(self, other):\n return None",
"def mul(self, other):\n return self._new_rep(self.rep * other)",
"def product(self, x, y):\n return self( x.lift() * y.lift() )",
"def product(self, x, y):\n return self._cached_product(x.value, y.value)",
"def multiply(self: T, other: T) -> T:",
"def basic_geometric_product(obj1, obj2):\n def mul_table(b1, b2):\n return MV.base_mul_table[(b1, b2)]\n\n obj12 = bilinear_product(obj1 * obj2, mul_table)\n\n return obj12",
"def __mul__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Volt(self.volts * other, self.volt_unit, self.freq, self.freq_unit)\n else:\n if self.volt_unit != other.volt_unit:\n raise ArithmeticError(f\"The objects' volt units {self.volt_unit} and {other.volt_unit} are not the\"\n f\" same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n prod_sum = self.volts * other.volts\n return Volt(prod_sum, self.volt_unit, self.freq, self.freq_unit)",
"def mul(x, y):\n return multiply(x, y)",
"def __mul__(self, other):\n return Trits(self.trits * other)",
"def multiply(self, a, b):\n return a * b",
"def _multiply(self, other):\n raise NotImplementedError(\n \"{} does not support scalar multiplication\".format(type(self)))",
"def _mul(a, b):\n return a * b",
"def __mul__(self, other):\n if is_unit(other):\n # print \"quantity * unit\"\n # Many other mul/div operations delegate to here because I was debugging\n # a dimensionless unit conversion problem, which I ended up fixing within\n # the reduce_unit() method.\n unit = self.unit * other\n return Quantity(self._value, unit).reduce_unit(self.unit)\n elif is_quantity(other):\n # print \"quantity * quantity\"\n # Situations where the units cancel can result in scale factors from the unit cancellation.\n # To simplify things, delegate Quantity * Quantity to (Quantity * scalar) * unit\n return (self * other._value) * other.unit\n else:\n # print \"quantity * scalar\"\n return self._change_units_with_factor(self.unit, other, post_multiply=False)",
"def multiply(self, other):\n return Rational(self.numerator * other.denominator, self.denominator * other.numerator)",
"def multiply(x, y):\n return x * y",
"def multiply(x, y):\n return x * y",
"def multiply(x, y):\n return x * y",
"def multiplies(x, y):\n x[:] *= y[:]\n return x",
"def __mul__(self, other):\n return sum(self._ar * other._ar)"
] | [
"0.7158825",
"0.7158825",
"0.6799878",
"0.67005163",
"0.6686018",
"0.66069025",
"0.6605818",
"0.6602995",
"0.6474987",
"0.6459986",
"0.64501494",
"0.64255404",
"0.637624",
"0.63341546",
"0.6328572",
"0.6253951",
"0.62519056",
"0.62491167",
"0.6203832",
"0.6196025",
"0.6124351",
"0.6082682",
"0.6081118",
"0.6061501",
"0.60303104",
"0.60216975",
"0.60216975",
"0.60216975",
"0.60197634",
"0.601941"
] | 0.77959746 | 0 |
Return ohm as a resistance. This will not change the insulation_code of self._ohms. | def r(self) -> float:
return self._ohms.real | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getResistence(self):\n return self.resistence",
"def calculate_rh(self):\n # Check for existence of relative humidity and mixing ratio\n if self.data.get('Relative_Humidity') is None:\n if self.data.get('Mixing_Ratio') is None:\n raise KeyError('Calculate mixing ratio first!')\n else:\n # Convert mixing ratio to relative humidity\n sat_vapor = 6.11 * (10.0**((7.5 * self.data['Temperature_C']) /\n (237.7 + self.data['Temperature_C'])))\n\n sat_w = 621.97 * (sat_vapor / (self.data['Pressure'] -\n sat_vapor))\n\n self.data['Relative_Humidity'] = ((self.data['Mixing_Ratio'] /\n sat_w) * 100.0)",
"def get_setResistance(self):\n self.read(\":RES?\")",
"def get_resistance(self, res_type='float'):\n if res_type == 'text':\n # Output to be a string\n # Transform value (in Ohm) as a int string\n val = str(int(self.amplifier.res))\n\n # Compute displayable unit of the value\n unit = (len(val) - 1) // 3\n length = len(val) - unit * 3\n if unit <= 0:\n unit = ' Ohm'\n elif unit == 1:\n unit = ' kOhm'\n elif unit == 2:\n unit = ' MOhm'\n elif unit == 3:\n unit = ' GOhm'\n elif unit == 4:\n unit = ' TOhm'\n else:\n unit = ' 1E{} Ohm'.format(unit * 3)\n\n # Change the unit of the value\n if len(val) < length + 3:\n text_value = val[:length] + '.' + val[length:] + unit\n else:\n text_value = val[:length] + '.' + val[length:length + 2] + unit\n\n return text_value\n\n elif res_type == 'float':\n # Output to be a float\n return self.amplifier.res",
"def get_resistance(self):\n adc = ADC(self.pin)\n value = adc.read()\n if value == 0:\n return -1\n\n return (4095./value - 1.) * self.RLOAD",
"def calc_impedance(self) -> (Ohm, None):\n power: ComType = complex(0)\n power_unit: str = ''\n amp: NumType = 0\n amp_unit: str = ''\n volt: NumType = 0\n volt_unit: str = ''\n\n if self._volt_exists and self._volt_exists:\n if hasattr(self._obj1, 'volts'):\n volt, volt_unit = self._obj1.volts, self._obj1.volt_unit\n elif hasattr(self._obj1, 'amps'):\n amp, amp_unit = self._obj1.amps, self._obj1.amp_unit\n if hasattr(self._obj2, 'volts'):\n volt, volt_unit = self._obj2.volts, self._obj2.volt_unit\n elif hasattr(self._obj2, 'amps'):\n amp, amp_unit = self._obj2.amps, self._obj2.amp_unit\n z = volt / amp\n z_unit: str = f'Ohms ({volt_unit}/{amp_unit})'\n\n elif self._amp_exists and self._power_exists:\n if hasattr(self._obj1, 'amps'):\n amp, amp_unit = self._obj1.amps, self._obj1.amp_unit\n elif hasattr(self._obj1, 'power'):\n power, power_unit = self._obj1.power, self._obj1.power_unit\n if hasattr(self._obj2, 'amps'):\n amp, amp_unit = self._obj2.amps, self._obj2.amp_unit\n elif hasattr(self._obj2, 'power'):\n power, power_unit = self._obj2.power, self._obj2.power_unit\n z = power / amp**2\n z_unit: str = f'Ohms ({power_unit}/{amp_unit}^2)'\n\n else:\n return None\n\n return Ohm(z, z_unit, self._obj1.frequency, self._obj1.freq_unit)",
"def get_resistance(self):\n adc = ADC(self.pin)\n value = adc.read()\n if value == 0:\n return -1\n\n return (4095./value - 1.) * self.RLOAD # ESP32 maksimi, ESP8266:lle arvo on 1023",
"def input_resistance(self):\n return None",
"def set_resistance(self, value):\n self.write(\":RES {}OHM\".format(value))",
"def get_rms(self):\r\n return self.rms.copy()",
"def access_resistance(self):\n return None",
"def get_rzero(self):\n return self.get_resistance() * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))",
"def get_rzero(self):\n return self.get_resistance() * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))",
"def rond(self):\n return self._rond.get_waarde()",
"def rh(self, h):\n sez=self.getSect(h)\n area=self.area(sez)\n wetborder = self.wetBorder(sez)\n return area/wetborder",
"def get_rho(self):\n return self.rho",
"def RestEnergy(self):\n return (self.restMass * const.speed_of_light * const.speed_of_light)",
"def get_resistance(self):\n\t\tdata = bus.read_byte_data(AD5259_DEFAULT_ADDRESS, AD5259_WORD_ADDR_RDAC)\n\t\t\n\t\t# Convert the data\n\t\tresistance_wb = (data / 256.0) * 5.0\n\t\tresistance_wa = 5 - resistance_wb\n\t\t\n\t\treturn {'a' : resistance_wa, 'b' : resistance_wb}",
"def get_corrected_resistance(self, temperature, humidity):\n return self.get_resistance()/ self.get_correction_factor(temperature, humidity)",
"def get_corrected_resistance(self, temperature, humidity):\n return self.get_resistance()/ self.get_correction_factor(temperature, humidity)",
"def Om(self):\n return self._Om",
"def resamp(self):\n if self._resamp is None:\n # self._resamp = self.distributions.uniform(0., 1.) * units.deg\n # return self._resamp\n # first make resamp appropriate for low-e orbits.\n amp_max = (-403.632 + 9.09917 * self.phi0.to('deg').value - 0.0442498 *\n self.phi0.to('deg').value ** 2 - 0.0883975 / self.phi0.to('deg').value) * units.deg\n amp_max[self.e < 0.05] = 15 * units.deg\n amp_min = (79.031 * numpy.exp(-(self.phi0.to('deg').value - 121.3435) ** 2 / (2 * 15.51349 ** 2))) * units.deg\n amp_min[self.e < 0.05] = 0 * units.deg\n self._resamp = amp_max - self.distributions.linear(0.25, 1) * (amp_max - amp_min)\n self._resamp[self.e < 0.05] = 15 * units.deg\n return self._resamp",
"def res_h5(self):\n return self._res_h5",
"def get_on_resistance(self):\n is_nchannel = True\n stack = 4\n is_cell = False\n return self.tr_r_on(self.nmos_width, is_nchannel, stack, is_cell)",
"def a_realization(self):\n return self.UHP()",
"def anisotropy_solution(self, r, **kwargs):\n return self._model.anisotropy_solution(r, **kwargs)",
"def sorrow(self):\n return self.emotions.sorrow",
"def anisotropy_solution(r):\n return r**2",
"def get_corrected_rzero(self, temperature, humidity):\n return self.get_corrected_resistance(temperature, humidity) * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))",
"def get_corrected_rzero(self, temperature, humidity):\n return self.get_corrected_resistance(temperature, humidity) * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))"
] | [
"0.58289057",
"0.5756095",
"0.55540806",
"0.5437796",
"0.54163355",
"0.53827655",
"0.5373473",
"0.5333576",
"0.5330446",
"0.5281449",
"0.5253052",
"0.5227909",
"0.5227909",
"0.5215746",
"0.5182154",
"0.5170751",
"0.5131431",
"0.5111294",
"0.5100755",
"0.5100755",
"0.5049968",
"0.5035712",
"0.5002115",
"0.49929172",
"0.49503747",
"0.49457914",
"0.49435788",
"0.4932997",
"0.49258187",
"0.49258187"
] | 0.59486413 | 0 |
There are two modes. 1. Add a complex to a Power() object. Return a new Power() object with '.power' that is the sum of the 'self.Power' and the passed complex(). 2. Adding two Power() objects together, returning a new Power() object with '.power' that is the sum of 'self.power' and 'other.ohm'. | def __add__(self, other):
if isinstance(other, complex):
return Power(self.power + other, self.power_unit, self.freq, self.freq_unit)
if self.power_unit != other.power_unit:
raise ArithmeticError(f"The objects' ohm units {self.power_unit} and {other.power_unit} are not the same.")
if self.freq != other.frequency:
raise ArithmeticError(f"The objects' frequency {self.freq} and {other.frequency} are not the same.")
if self.freq_unit != other.freq_unit:
raise ArithmeticError(f"The objects' frequency units {self.freq_unit} and {other.freq_unit} "
f"are not the same.")
power_sum = self.power + other.power
return Power(power_sum, self.power_unit, self.freq, self.freq_unit) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __add__(self, other):\n if isinstance(other, complex):\n return Ohm(self.ohm + other, self.ohm_unit, self.freq, self.freq_unit)\n if self.ohm_unit != other.ohm_unit:\n raise ArithmeticError(f\"The objects' ohm units {self.ohm_unit} and {other.ohm_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n ohm_sum = self.ohm + other.ohm\n return Ohm(ohm_sum, self.ohm_unit, self.freq, self.freq_unit)",
"def __add__(self, other):\n self.sum_complex_num = Complex((self.real + other.real), (self.imaginary + other.imaginary))\n return self.sum_complex_num",
"def __add__(self,other):\n\t\treal = self.realPart + other.realPart\n\t\timaginary = self.imaginaryPart + other.imaginaryPart\n\n\t\t#create and return new complexnumber\n\t\treturn real,imaginary",
"def __add__(self, other):\n if isinstance(other, float) or isinstance(other, int):\n return Complex(self._reNum + other, self._imNum)\n if isinstance(other, complex):\n return Complex(self._reNum + other.real, self._imNum + other.imag)\n return Complex(self._reNum + other._reNum, self._imNum + other._imNum)",
"def __radd__(self, other):\n other = _to_complex(other)\n return ComplexVal(other.r + self.r, other.i + self.i)",
"def __sub__(self, other):\n if isinstance(other, complex):\n return Power(self.power - other, self.power_unit, self.freq, self.freq_unit)\n if self.power_unit != other.power_unit:\n raise ArithmeticError(f\"The objects' ohm units {self.power_unit} and {other.power_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n power_sum = self.power - other.power\n return Power(power_sum, self.power_unit, self.freq, self.freq_unit)",
"def __mul__(self,other):\n\t\treal = (self.realPart * other.realPart) - (self.imaginaryPart * other.imaginaryPart)\n\t\timaginary = (self.realPart*other.imaginaryPart) + (self.imaginaryPart * other.realPart)\n\n\t\t# create and return complexNumber\n\t\treturn real,imaginary",
"def __mul__(self, other):\n if isinstance(other, int) or isinstance(other, float) or isinstance(other, complex):\n return Ohm(self.ohm * other, self.ohm_unit, self.freq, self.freq_unit)\n if self.ohm_unit != other.ohm_unit:\n raise ArithmeticError(f\"The objects' ohm units {self.ohm_unit} and {other.ohm_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n prod_sum = self.ohm * other.ohm\n return Ohm(prod_sum, self.ohm_unit, self.freq, self.freq_unit)",
"def __add__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Amp(self.amps + other, self.amp_unit, self.freq, self.freq_unit)\n if self.amp_unit != other.amp_unit:\n raise ArithmeticError(f\"The objects' amp units {self.amp_unit} and {other.amp_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n amp_sum = self.amps + other.amps\n return Amp(amp_sum, self.amp_unit, self.freq, self.freq_unit)",
"def __add__(self, other):\n if other == 0:\n return self\n\n pmf = Pmf()\n for v1, p1 in self.items():\n for v2, p2 in other.items():\n pmf[v1 + v2] += p1 * p2\n return pmf",
"def __mul__(self, other):\n if isinstance(other, int) or isinstance(other, float) or isinstance(other, complex):\n return Power(self.power * other, self.power_unit, self.freq, self.freq_unit)\n if self.power_unit != other.power_unit:\n raise ArithmeticError(f\"The objects' power units {self.power_unit} \"\n f\"and {other.power_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n power_prod = self.power * other.power\n return Power(power_prod, self.power_unit, self.freq, self.freq_unit)",
"def __add__(self, other):\n return add_mps(self, other)",
"def __pow__(self, other, **kwargs):\n kwargs.update({'operator': 'pow'})\n return self.__add__(other, **kwargs)",
"def __add__(self, other):\n base = deepcopy(self)\n base += other # (+=) == __iadd__\n return base",
"def __add__(self, other):\n try:\n new_num = (self._num * other._den) + (self._den * other._num)\n new_den = (self._den * other._den)\n return Rational(new_num, new_den)\n except AttributeError:\n return (self + Rational.parse_number(other))",
"def __add__(self, other)\n \n def __mul__(self, other):\n print(\"Standard multiplication with\", other.which)\n print(other.which)\n \n if isinstance(other, int):\n print(\"Other is int\")\n self.coefficients *= other\n \n return self\n \n else:\n print(\"Other operator is spin operator\")\n summandsNew = []\n coeffsNew = []\n for otherSummandIndex, otherSummand in enumerate(other.summands):\n for thisSummandIndex, thisSummand in enumerate(self.summands):\n summandsNew.append(flatten([thisSummand, otherSummand]))\n coeffsNew.append(self.coefficients[thisSummandIndex]*other.coefficients[otherSummandIndex])\n print(summandsNew) \n self.coeffs = coeffsNew\n self.summands = summandsNew\n \n return self",
"def __add__( self, other ) :\n\n try :\n other = float( other )\n c_ls = self.copy( )\n for l, c_l in enumerate( c_ls ) : c_ls.coefficients[l] += other\n except :\n self.checkSameSeriesType( other )\n c_l1, c_l2 = self, other\n if( len( self ) < len( other ) ) : c_l1, c_l2 = other, self\n c_ls = c_l1.copy( )\n for l, c_l in enumerate( c_l2 ) : c_ls.coefficients[l] += c_l\n return( c_ls )",
"def __add__(self, other):\n if (len(self.arg) < len(other.arg)):\n summ = Polynomial(other.arg)\n i = len(self.arg) - 1\n for x in self.arg:\n summ.arg[i] = self.arg[i] + summ.arg[i]\n i = i - 1\n else:\n summ = Polynomial(self.arg)\n i = len(other.arg) - 1\n for x in other.arg:\n summ.arg[i] = other.arg[i] + summ.arg[i]\n i = i - 1\n return summ",
"def __mul__(self, other):\n self.mul_complex_num = Complex((self.real * other.real - self.imaginary * other.imaginary),\n (self.real * other.imaginary + self.imaginary * other.real))\n return self.mul_complex_num",
"def __add__(self, other):\n return self.add(other)",
"def __add__(self, other):\n\n if isinstance(other, type(self)):\n # always create new fields, since otherwise c = a - b changes a as well!\n p = fields(self)\n p.elec[:] = self.elec + other.elec\n p.magn[:] = self.magn + other.magn\n return p\n else:\n raise DataError(\"Type error: cannot add %s to %s\" % (type(other), type(self)))",
"def __add__(self, other):\r\n return self.add(other)",
"def __add__(self, other):\n\t\tif isinstance(other, Value):\n\t\t\treturn Value(self.val + other.val, sqrt(self.error**2 + other.error**2))\n\t\telse:\n\t\t\treturn Value(self.val + other, self.error)",
"def __add__(self, other):\n if isinstance(other, Factorization):\n other = other.value()\n return self.value() + other",
"def __add__(self, other):\n new_numerator = self._numerator * other.denominator() + other.numerator() * self._denominator\n new_denominator = self._denominator * other.denominator()\n return Rational(new_numerator, new_denominator)",
"def __add__(self, polynomial_2: Polynomial) -> Polynomial:\n\n if self.degree > polynomial_2.degree:\n coefficients = self.coefficients[:]\n for i in range(polynomial_2.degree + 1):\n coefficients[i] += polynomial_2.coefficients[i]\n return Polynomial(self.degree, coefficients)\n else:\n coefficients = polynomial_2.coefficients[:]\n for i in range(self.degree + 1):\n coefficients[i] += self.coefficients[i]\n return Polynomial(polynomial_2.degree, coefficients)",
"def __add__(self, other):\n cls = self.__class__\n return cls(self.x+other.x, self.y+other.y, self.z+other.z)",
"def __add__(self, other):\n try:\n total = {self.var: 1, other.var: 1}\n return AutoDiffReverse(self.val + other.val, None, der=total)\n except AttributeError:\n return AutoDiffReverse(self.val + other, None, {self.var: 1})",
"def __radd__(self, other):\n return self + other",
"def __radd__(self, other):\n return self + other"
] | [
"0.8250026",
"0.7914886",
"0.785498",
"0.7705226",
"0.7285699",
"0.7202161",
"0.7138414",
"0.7040537",
"0.70316243",
"0.7004599",
"0.6935744",
"0.6896497",
"0.6888758",
"0.6868086",
"0.6868002",
"0.6853717",
"0.68070215",
"0.6806516",
"0.68060136",
"0.6790358",
"0.67831635",
"0.67530555",
"0.6722374",
"0.67107725",
"0.66961867",
"0.6669444",
"0.6665292",
"0.6662588",
"0.6656885",
"0.6656885"
] | 0.90089023 | 0 |
There are two modes. 1. Subtract a complex to an Power() object. Return a new Power() object with '.power' that is the sum of the 'self.power' and the passed complex(). 2. Subtract two Power() objects, returning a new Power() object with '.ohm' that is the difference of 'self.power' and 'other.power'. a) The frequencies (including the frequency units) of both objects must be equal. b) The power_unit of both objects must be equal. If any of these conditions are not met then an ArithmeticError exception will be raised. | def __sub__(self, other):
if isinstance(other, complex):
return Power(self.power - other, self.power_unit, self.freq, self.freq_unit)
if self.power_unit != other.power_unit:
raise ArithmeticError(f"The objects' ohm units {self.power_unit} and {other.power_unit} are not the same.")
if self.freq != other.frequency:
raise ArithmeticError(f"The objects' frequency {self.freq} and {other.frequency} are not the same.")
if self.freq_unit != other.freq_unit:
raise ArithmeticError(f"The objects' frequency units {self.freq_unit} and {other.freq_unit} "
f"are not the same.")
power_sum = self.power - other.power
return Power(power_sum, self.power_unit, self.freq, self.freq_unit) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __sub__(self, other):\n if isinstance(other, complex):\n return Ohm(self.ohm - other, self.ohm_unit, self.freq, self.freq_unit)\n if self.ohm_unit != other.ohm_unit:\n raise ArithmeticError(f\"The objects' ohm units {self.ohm_unit} and {other.ohm_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n ohm_sum = self.ohm - other.ohm\n return Ohm(ohm_sum, self.ohm_unit, self.freq, self.freq_unit)",
"def __add__(self, other):\n if isinstance(other, complex):\n return Power(self.power + other, self.power_unit, self.freq, self.freq_unit)\n if self.power_unit != other.power_unit:\n raise ArithmeticError(f\"The objects' ohm units {self.power_unit} and {other.power_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n power_sum = self.power + other.power\n return Power(power_sum, self.power_unit, self.freq, self.freq_unit)",
"def __truediv__(self, other):\n if isinstance(other, int) or isinstance(other, float) or isinstance(other, complex):\n return Power(self.power / other, self.power_unit, self.freq, self.freq_unit)\n if self.power_unit != other.power_unit:\n raise ArithmeticError(f\"The objects' power units {self.power_unit} and {other.power_unit} are not the \"\n f\"same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n power_factor = self.power / other.power\n return Power(power_factor, self.power_unit, self.freq, self.freq_unit)",
"def __mul__(self, other):\n if isinstance(other, int) or isinstance(other, float) or isinstance(other, complex):\n return Power(self.power * other, self.power_unit, self.freq, self.freq_unit)\n if self.power_unit != other.power_unit:\n raise ArithmeticError(f\"The objects' power units {self.power_unit} \"\n f\"and {other.power_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n power_prod = self.power * other.power\n return Power(power_prod, self.power_unit, self.freq, self.freq_unit)",
"def __sub__(self,other):\n\t\treal = self.realPart - other.realPart\n\t\timaginary = self.imaginaryPart - other.imaginaryPart\n\n\t\t#create and return complexNumber\n\t\treturn real,imaginary",
"def __mul__(self, other):\n if isinstance(other, int) or isinstance(other, float) or isinstance(other, complex):\n return Ohm(self.ohm * other, self.ohm_unit, self.freq, self.freq_unit)\n if self.ohm_unit != other.ohm_unit:\n raise ArithmeticError(f\"The objects' ohm units {self.ohm_unit} and {other.ohm_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n prod_sum = self.ohm * other.ohm\n return Ohm(prod_sum, self.ohm_unit, self.freq, self.freq_unit)",
"def __truediv__(self, other):\n if isinstance(other, int) or isinstance(other, float) or isinstance(other, complex):\n return Ohm(self.ohm / other, self.ohm_unit, self.freq, self.freq_unit)\n if self.ohm_unit != other.ohm_unit:\n raise ArithmeticError(f\"The objects' ohm units {self.ohm_unit} and {other.ohm_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n prod_sum = self.ohm / other.ohm\n return Ohm(prod_sum, self.ohm_unit, self.freq, self.freq_unit)",
"def __sub__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Amp(self.amps - other, self.amp_unit, self.freq, self.freq_unit)\n if self.amp_unit != other.amp_unit:\n raise ArithmeticError(f\"The objects' amp units {self.amp_unit} and {other.amp_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n amp_sum = self.amps - other.amps\n return Amp(amp_sum, self.amp_unit, self.freq, self.freq_unit)",
"def __mul__(self,other):\n\t\treal = (self.realPart * other.realPart) - (self.imaginaryPart * other.imaginaryPart)\n\t\timaginary = (self.realPart*other.imaginaryPart) + (self.imaginaryPart * other.realPart)\n\n\t\t# create and return complexNumber\n\t\treturn real,imaginary",
"def __rpow__(self, other):\n\n return self.__pow__(other)",
"def __add__(self, other):\n if isinstance(other, complex):\n return Ohm(self.ohm + other, self.ohm_unit, self.freq, self.freq_unit)\n if self.ohm_unit != other.ohm_unit:\n raise ArithmeticError(f\"The objects' ohm units {self.ohm_unit} and {other.ohm_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n ohm_sum = self.ohm + other.ohm\n return Ohm(ohm_sum, self.ohm_unit, self.freq, self.freq_unit)",
"def __rsub__(self, other):\n other = _to_complex(other)\n return ComplexVal(other.r - self.r, other.i - self.i)",
"def __sub__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Volt(self.volts - other, self.volt_unit, self.freq, self.freq_unit)\n if self.volt_unit != other.volt_unit:\n raise ArithmeticError(f\"The objects' volt units {self.volt_unit} and {other.volt_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n volt_sum = self.volts - other.volts\n return Volt(volt_sum, self.volt_unit, self.freq, self.freq_unit)",
"def __sub__(self, other):\n try:\n new_num = (self._num * other._den) - (self._den * other._num)\n new_den = (self._den * other._den)\n return Rational(new_num, new_den)\n except AttributeError:\n return (self - Rational.parse_number(other))",
"def calc_power(self) -> (Power, None):\n amp: NumType = 0\n amp_unit: str = ''\n volt: NumType = 0\n volt_unit: str = ''\n ohm: ComType = complex(0)\n ohm_unit: str = ''\n\n if self._amp_exists and self._volt_exists:\n if hasattr(self._obj1, 'amps'):\n amp, amp_unit = self._obj1.amps, self._obj1.amp_unit\n elif hasattr(self._obj1, 'volts'):\n volt, volt_unit = self._obj1.volts, self._obj1.volt_unit\n if hasattr(self._obj2, 'amps'):\n amp, amp_unit = self._obj2.amps, self._obj2.amp_unit\n elif hasattr(self._obj2, 'volts'):\n volt, volt_unit = self._obj2.volts, self._obj2.volt_unit\n pwr = amp * volt\n pwr_unit: str = f'{volt_unit}*{amp_unit}'\n\n elif self._amp_exists and self._ohm_exists:\n if hasattr(self._obj1, 'amps'):\n amp, amp_unit = self._obj1.amps, self._obj1.amp_unit\n elif hasattr(self._obj1, 'ohm'):\n ohm, ohm_unit = self._obj1.ohm, self._obj1.ohm_unit\n if hasattr(self._obj2, 'amps'):\n amp, amp_unit = self._obj2.amps, self._obj2.amp_unit\n elif hasattr(self._obj2, 'ohm'):\n ohm, ohm_unit = self._obj2.ohm, self._obj2.ohm_unit\n pwr = amp**2 * ohm\n pwr_unit: str = f'{amp_unit}^2*{ohm_unit}'\n\n elif self._volt_exists and self._ohm_exists:\n if hasattr(self._obj1, 'ohm'):\n ohm, ohm_unit = self._obj1.ohm, self._obj1.ohm_unit\n elif hasattr(self._obj1, 'volts'):\n volt, volt_unit = self._obj1.volts, self._obj1.volt_unit\n if hasattr(self._obj2, 'ohm'):\n ohm, ohm_unit = self._obj2.ohm, self._obj2.ohm_unit\n elif hasattr(self._obj2, 'volts'):\n volt, volt_unit = self._obj2.volts, self._obj2.volt_unit\n pwr = volt**2 / ohm\n pwr_unit: str = f'{volt_unit}^2/{ohm_unit}'\n\n else:\n return None\n\n return Power(pwr, pwr_unit, self._obj1.frequency, self._obj1.freq_unit)",
"def __sub__( self, other ) :\n\n try :\n other = float( other )\n c_ls = self.copy( )\n for l, c_l in enumerate( c_ls ) : c_ls.coefficients[l] -= other\n except :\n self.checkSameSeriesType( other )\n c_l1, c_l2 = self.coefficients, other.coefficients\n if( len( self ) < len( other ) ) : c_l1, c_l2 = c_l2, c_l1\n c_ls = c_l1.copy( )\n for l, c_l in enumerate( c_l2 ) : c_ls.coefficients[l] += c_l\n return( c_ls )",
"def __rpow__(self, other):\n pass # TODO: implement this.",
"def __rdiv__(self, other):\n other = _to_complex(other)\n return self.inv().__mul__(other)",
"def __rmul__(self, other):\n\n if isinstance(other, float):\n # always create new fields, since otherwise c = a - b changes a as well!\n p = fields(self)\n p.elec[:] = other * self.elec\n p.magn[:] = other * self.magn\n return p\n else:\n raise DataError(\"Type error: cannot multiply %s with %s\" % (type(other), type(self)))",
"def __rpow__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n raise excep.biogemeError(\n f'This is not a valid expression: {other}'\n )\n return Power(other, self)",
"def __mul__(self, other):\n if isinstance(other, float) or isinstance(other, int):\n return Complex(self._reNum * other, self._imNum * other)\n\n if isinstance(other, complex):\n a = self._reNum * other.real\n b = self._reNum * other.imag\n c = self._imNum * other.real\n d = self._imNum * other.imag\n return Complex(a - d, c + b)\n\n a = self._reNum * other._reNum\n b = self._reNum * other._imNum\n c = self._imNum * other._reNum\n d = self._imNum * other._imNum\n return Complex(a - d, c + b)",
"def complex_difference(c_1,c_2):\n return c_1 - c_2",
"def __pow__(self, other):\n return MyCustomNumber(self.value ** other.value)",
"def __sub__(self, other):\n\n if isinstance(other, type(self)):\n # always create new fields, since otherwise c = a - b changes a as well!\n p = fields(self)\n p.elec[:] = self.elec - other.elec\n p.magn[:] = self.magn - other.magn\n return p\n else:\n raise DataError(\"Type error: cannot subtract %s from %s\" % (type(other), type(self)))",
"def __sub__(self, other):\n top = self.num*other.denom - self.denom*other.num\n bott = self.denom*other.denom\n return Fraction(top, bott)",
"def __sub__(self, other):\n top = self.num*other.denom - self.denom*other.num\n bott = self.denom*other.denom\n return fraction(top, bott)",
"def __mul__(self, other):\n self.mul_complex_num = Complex((self.real * other.real - self.imaginary * other.imaginary),\n (self.real * other.imaginary + self.imaginary * other.real))\n return self.mul_complex_num",
"def __sub__(self, other):\n if not self.unit.is_compatible(other.unit):\n raise TypeError('Cannot subtract two quantities with incompatible units \"%s\" and \"%s\".' % (self.unit, other.unit))\n value = self._value - other.value_in_unit(self.unit)\n unit = self.unit\n return Quantity(value, unit)",
"def __pow__(self, other, **kwargs):\n kwargs.update({'operator': 'pow'})\n return self.__add__(other, **kwargs)",
"def __sub__(self, other):\n if isinstance(other, Factorization):\n other = other.value()\n return self.value() - other"
] | [
"0.7868433",
"0.73533434",
"0.73213",
"0.7271727",
"0.7052522",
"0.6915829",
"0.66105074",
"0.65981007",
"0.65697485",
"0.64671576",
"0.64670223",
"0.6457807",
"0.6430139",
"0.6407553",
"0.64031404",
"0.63710004",
"0.6362891",
"0.6256372",
"0.62556434",
"0.62384874",
"0.6204697",
"0.6197671",
"0.6192607",
"0.61807775",
"0.6172371",
"0.61637706",
"0.61478376",
"0.6126462",
"0.61229074",
"0.6063025"
] | 0.8721685 | 0 |
Multiply two Power() objects together, returning the product of the two objects. | def __mul__(self, other):
if isinstance(other, int) or isinstance(other, float) or isinstance(other, complex):
return Power(self.power * other, self.power_unit, self.freq, self.freq_unit)
if self.power_unit != other.power_unit:
raise ArithmeticError(f"The objects' power units {self.power_unit} "
f"and {other.power_unit} are not the same.")
if self.freq != other.frequency:
raise ArithmeticError(f"The objects' frequency {self.freq} and {other.frequency} are not the same.")
if self.freq_unit != other.freq_unit:
raise ArithmeticError(f"The objects' frequency units {self.freq_unit} and {other.freq_unit} "
f"are not the same.")
power_prod = self.power * other.power
return Power(power_prod, self.power_unit, self.freq, self.freq_unit) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def product(self, x, y):\n return self( x.lift() * y.lift() )",
"def __mul__(self, other):\r\n return self.prod(other)",
"def mul_power(a,b):\r\n if (base(a)==base(b)):\r\n return make_power(base(a), power(a)+power(b))\r\n else:\r\n return calc_power(a)*calc_power(b)",
"def mul(self, a, b):\n return a * b",
"def product(self, x, y):\n return self._cached_product(x.value, y.value)",
"def mul(self, other):\n return self._new_rep(self.rep * other)",
"def pow(self, a, b):\n return a ** b",
"def mul(self, a: 'PFElement', b: 'PFElement') -> 'PFElement':\n return self(self._pf_mul(a.value, b.value, self.multiplicative_group))",
"def product(a, b):\n return a * b",
"def mul(self, other):\n\n return self._get(\"mul\", other, self.__class__)",
"def __pow__(self, other, tensor=False):\r\n return self.prod(other, tensor=True)",
"def multiply(self, a, b):\n return a * b",
"def __mul__(self, other):\n try:\n total = {self.var: other.val, other.var: self.val}\n return AutoDiffReverse(self.val * other.val, None, total)\n except AttributeError:\n return AutoDiffReverse(self.val * other, None, {self.var: other})",
"def mul(x, y):\n return multiply(x, y)",
"def power(x, y):\n return x ** y",
"def __pow__(self, other):\n return self._multiplicative_func(float.__pow__, self, other)",
"def multiply(lhs, rhs):\n return _make.multiply(lhs, rhs)",
"def __mul__(self, other, **kwargs):\n kwargs.update({'operator': 'mul'})\n return self.__add__(other, **kwargs)",
"def __mul__(self, other):\n\n return self._mul_div(other, div=False)",
"def multiply(self, other):\n from divisi2 import operators\n return operators.multiply(self, other)",
"def _mul(a, b):\n return a * b",
"def __pow__(self, other):\n return MyCustomNumber(self.value ** other.value)",
"def multiply(num1, num2):\n product = num1 * num2\n return product",
"def multiply(num1, num2):\n product = num1 * num2\n return product",
"def _mul(self, other):\n return None",
"def multiplies(x, y):\n x[:] *= y[:]\n return x",
"def __mul__(self, other):\n\n return self._binary_elementwise_op(other, np.multiply)",
"def mul(a, b):\n c = Calculator()\n result = c.mul(a, b)\n click.echo('{} * {} = {}'.format(a, b, result))",
"def mul(a: Decimal, b: Decimal) -> Decimal:\n return a * b",
"def multiplication(num1, num2):\n product = num1 * num2\n return product"
] | [
"0.77632946",
"0.7752715",
"0.7435338",
"0.7435006",
"0.72454077",
"0.71805924",
"0.7164024",
"0.71510655",
"0.7143583",
"0.71129245",
"0.7090096",
"0.7073022",
"0.7042423",
"0.7029677",
"0.7001191",
"0.6972775",
"0.6965795",
"0.695625",
"0.6954136",
"0.6921609",
"0.68991876",
"0.68985623",
"0.6896681",
"0.6896681",
"0.6857319",
"0.68255156",
"0.68211144",
"0.6820515",
"0.68035954",
"0.67815226"
] | 0.8015777 | 0 |
Compare the frequency attributes of the two objects and ensure that they are equal to allow the calculation to take place. Set the 'self._freq_equal' to True if the same. | def compare_freq(self):
if self._obj1.frequency != self._obj2.frequency:
raise ArithmeticError(f"The objects' frequency {self._obj1.frequency} and {self._obj2.frequency} are not the same.")
if self._obj1.freq_unit != self._obj1.freq_unit:
raise ArithmeticError(f"The objects' frequency units {self._obj1.freq_unit} and {self._obj2.freq_unit} "
f"are not the same.")
self._freq_equal = True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __eq__(self, other):\n return np.array_equal(\n self.np_floats(),\n other.np_floats()) and np.array_equal(\n self.np_ints(),\n other.np_ints()) and np.array_equal(\n self.freqs,\n other.freqs)",
"def __gt__(self, other):\n if not isinstance(other, HuffNode):\n raise TypeError('not an instance of HuffNode')\n\n return self.freq > other.freq",
"def __eq__(self,f2):\n return self.__num * f2.den == self.__den * f2.num",
"def isSubset(self, other):\n for val, freq in self.items():\n if freq > other.freq(val):\n return False\n return True",
"def __eq__(self, other):\n if type(other) is not type(self):\n return False\n if self._sample_rate != other._sample_rate:\n return False\n if self._samples.shape != other._samples.shape:\n return False\n if np.any(self.samples != other._samples):\n return False\n return True",
"def __eq__(self, other):\n # check equality of names and attributes as well as that of the incident Node objects\n return \\\n self.weight == other.get_weight() and \\\n self.attributes.__eq__(other.get_attributes()) and \\\n self.get_incident_nodes().__eq__(other.get_incident_nodes())",
"def __eq__(self, other):\n if not isinstance(other, PeriodRate):\n return False\n\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n if not isinstance(other, EncodingStatistics):\n return False\n\n return self.__dict__ == other.__dict__",
"def same_frequency(num1, num2):\n return req_counter(str(num1)) == req_counter(str(num2))",
"def __eq__(self, other):\n\n return self._comparator.compare_measurements(self, other) == 0",
"def __eq__(self, other):\n if set(self.comp) != set(other.comp):\n return False\n if abs(self.energy - other.energy) > 1e-6:\n return False\n for key in self.comp:\n if abs(self.unit_comp[key] - other.unit_comp[key]) > 1e-6:\n return False\n return True",
"def equals(self, other):\n\n\t\tif not isinstance(other, DiscreteDistribution):\n\t\t\treturn False\n\n\t\tif set(self.keys()) != set(other.keys()):\n\t\t\treturn False\n\n\t\tfor key in self.keys():\n\t\t\tself_prob = round(self.log_probability(key), 12)\n\t\t\tother_prob = round(other.log_probability(key), 12)\n\t\t\tif self_prob != other_prob:\n\t\t\t\treturn False\n\n\t\treturn True",
"def __eq__(self, other):\n if not isinstance(other, Metric):\n return False\n\n return self.to_dict() == other.to_dict()",
"def __eq__(self, other):\n return self.items() == other.items()",
"def __eq__(self, other):\n return np.all([\n self.__getattribute__(name) == other.__getattribute__(name)\n for name in self._fields\n ])",
"def same_frequency(num1, num2):\n freqlist1 = list(str(num1))\n freqlist2 = list(str(num2))\n setfreq1 = set(freqlist1)\n setfreq2 = set(freqlist2)\n setdict1 = {}\n setdict2 = {}\n for digit in setfreq1:\n setdict1[digit] = freqlist1.count(digit)\n for digit in setfreq2:\n setdict2[digit] = freqlist2.count(digit)\n return setdict1 == setdict2",
"def __eq__(self, other):\n if not isinstance(other, OneOfFluidResultControlsFieldCalculations):\n return False\n\n return self.to_dict() == other.to_dict()",
"def __eq__(self, other):\n for (ngram, value) in self.items():\n if ngram not in other or other[ngram] != value:\n return False\n for (ngram, value) in other.items():\n if ngram not in self or self[ngram] != value:\n return False\n return True",
"def __ge__(self, other: Schema) -> bool:\n return set(self.items()) >= set(other.items())",
"def __eq__(self, other):\n return self.times == other.times",
"def __eq__(self, other):\n return isinstance(other, Bag) and Counter(self.items) == Counter(other.items)",
"def check_allele_freq_diff(self):\r\n \r\n if not self.old or not self.new:\r\n self.freq_diff = \"NA\"\r\n self.pvalue = \"NA\"\r\n \r\n else:\r\n old_frq = self.old.alt_percent\r\n new_frq = self.new.alt_percent\r\n \r\n if old_frq == 0 or new_frq == 0:\r\n self.freq_diff = \"NA\"\r\n \r\n else:\r\n self.freq_diff = abs(old_frq-new_frq)",
"def __eq__(self, other):\n if not isinstance(other, EmailPerformanceCustomerHistogramPeriod):\n return False\n\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n try:\n if self.__title__ != other.getTitle():\n return False\n\n if self.__y_units__ != other.getYUnits():\n return False\n\n if self.__y_label__ != other.getYLabel():\n return False\n\n if self.__data_set_type__ != other.getDataSetType():\n return False\n\n if len(self.__axis_labels__) != other.getDimension():\n return False\n\n if self.__axis_labels__ != other.getAllAxisLabels():\n return False\n\n if self.__axis_units__ != other.getAllAxisUnits():\n return False\n\n if self.attr_list != other.attr_list:\n return False\n\n except:\n return False\n\n return True",
"def __eq__(self, other):\n if type(self) != type(other):\n return False\n s_vars = vars(self)\n o_vars = vars(other)\n for v in vars(self):\n if s_vars[v] != o_vars[v]:\n print(\"unequal property {0}\\n\".format(v))\n if v.endswith(\"last_count_update_time\"):\n print(\"self: {0}\\n\".format(s_vars[v]))\n print(\"othr: {0}\\n\".format(o_vars[v]))\n return False\n return True",
"def compare(self, other):\n return len(self & other) / max(len(self | other), 1)",
"def __eq__(self, other):\n if not isinstance(other, Rate):\n return False\n\n return self.to_dict() == other.to_dict()",
"def __eq__(self, other):\n if not isinstance(other, RuleSchemaFormulaPredict):\n return False\n\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n if not isinstance(other, Quota):\n return False\n\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n return (self.name == other.name) and (self.wavelength_control == other.wavelength_control) \\\n and (self.gonio_angles == other.gonio_angles) and (self.wl_angles == other.wl_angles) \\\n and (self.wavelength_minimum == other.wavelength_minimum) \\\n and (self.wavelength_maximum == other.wavelength_maximum) \\\n and (self.wavelength_bandwidth == other.wavelength_bandwidth)"
] | [
"0.6924577",
"0.6023659",
"0.60017574",
"0.5967577",
"0.5945457",
"0.5889283",
"0.58852327",
"0.5857143",
"0.5848025",
"0.5846018",
"0.58393663",
"0.5835524",
"0.58251196",
"0.58249813",
"0.58152753",
"0.5802302",
"0.57974994",
"0.57899815",
"0.5783015",
"0.57757765",
"0.5775526",
"0.5768149",
"0.5755136",
"0.5727904",
"0.5725525",
"0.57170105",
"0.57011217",
"0.5677714",
"0.5670917",
"0.5666998"
] | 0.8551229 | 0 |
Calculate voltage using the two passed objects. Based on the two objects the method will determine the correct equation to use. The method will return None if the objects cannot be used for the called method. | def calc_voltage(self) -> (Volt, None):
power: ComType = complex(0)
power_unit: str = ''
amp: NumType = 0
amp_unit: str = ''
ohm: ComType = complex(0)
ohm_unit: str = ''
if self._amp_exists and self._ohm_exists:
if hasattr(self._obj1, 'amps'):
amp, amp_unit = self._obj1.amps, self._obj1.amp_unit
elif hasattr(self._obj1, 'ohm'):
ohm, ohm_unit = self._obj1.ohm, self._obj1.ohm_unit
if hasattr(self._obj2, 'amps'):
amp, amp_unit = self._obj2.amps, self._obj2.amp_unit
elif hasattr(self._obj2, 'ohm'):
ohm, ohm_unit = self._obj2.ohm, self._obj2.ohm_unit
volt = amp * ohm
volt_unit: str = f'Volts ({amp_unit}*{ohm_unit})'
elif self._amp_exists and self._power_exists:
if hasattr(self._obj1, 'amps'):
amp, amp_unit = self._obj1.amps, self._obj1.amp_unit
elif hasattr(self._obj1, 'power'):
power, power_unit = self._obj1.power, self._obj1.power_unit
if hasattr(self._obj2, 'amps'):
amp, amp_unit = self._obj2.amps, self._obj2.amp_unit
elif hasattr(self._obj2, 'power'):
power, power_unit = self._obj2.power, self._obj2.power_unit
volt = power / amp
volt_unit: str = f'Volts ({power_unit}/{amp_unit})'
else:
return None
return Volt(volt, volt_unit, self._obj1.frequency, self._obj1.freq_unit) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def inductive_voltdiv(Vin=None, Vout=None, L1=None, L2=None, find=''):\n if Vin is not None and L1 is not None and L2 is not None:\n Vout = (Vin * L1) / (L1 + L2)\n elif Vout is not None and L1 is not None and L2 is not None:\n Vin = (Vout) * (L1 + L2) / (L1)\n elif Vin is not None and Vout is not None and L2 is not None:\n L1 = L2 * (Vin - Vout) / (Vout)\n elif Vin is not None and Vout is not None and L1 is not None:\n L2 = L1 * Vout / (Vin - Vout)\n else:\n raise ValueError(\"ERROR: Invalid Parameters or too few\" +\n \" parameters given to calculate.\")\n\n find = find.lower()\n\n if find == 'vin':\n return Vin\n elif find == 'vout':\n return Vout\n elif find == 'l1':\n return L1\n elif find == 'l2':\n return L2\n else:\n return Vin, Vout, L1, L2",
"def vd(v2,v1):\n return v2-v1",
"def eval(self, Vobj):\n try:\n return Vobj.evaluated_on(self)\n except AttributeError:\n return self.A() * Vobj + self.b()",
"def eval(self, Vobj):\n if is_Vector(Vobj):\n return self.A() * Vobj + self.b()\n return Vobj.evaluated_on(self)",
"def get_voltage_and_current(self):\n return self.voltage_and_current",
"def evaluate(self, *args, **kwargs):\n return self.constant_velocity",
"def get_voltage(self):\n self._raise_not_implemented()",
"def get_voltage(self, i_sup, t, *args, **kwargs):\r\n raise NotImplementedError",
"def __rtruediv__(self, other):\n value = -1 / (self.val * self.val)\n total = {self.var: other * value}\n return AutoDiffReverse(other / self.val, None, total)",
"def velocity(df0, df1):\n velocity = df1 - df0\n return velocity",
"def get_voltage(self):\n summary = \" \".join(self.get_summary().split())\n pattern = '\\$.... .. (.*?) .*? .*? .*? .*? . .*? .*? . . . .*?'\n voltage = float(re.findall(pattern,summary).pop())\n return voltage",
"def __truediv__(self, other):\n try:\n value = -1 / (other.val * other.val)\n total = {self.var: 1 / other.val, other.var: value * self.val}\n return AutoDiffReverse(self.val / other.val, None, total)\n except AttributeError:\n total = {self.var: 1 / other}\n return AutoDiffReverse(self.val / other, None, total)",
"def get_voltage(self):\n print(\"voici le voltage de la batterie\")",
"def vel(self, *args, **kwargs) -> Any:\n pass",
"def builtin_voltage(\n donor_conc: float, # donor concentration\n acceptor_conc: float, # acceptor concentration\n intrinsic_conc: float, # intrinsic concentration\n) -> float:\n\n if donor_conc <= 0:\n raise ValueError(\"Donor concentration should be positive\")\n elif acceptor_conc <= 0:\n raise ValueError(\"Acceptor concentration should be positive\")\n elif intrinsic_conc <= 0:\n raise ValueError(\"Intrinsic concentration should be positive\")\n elif donor_conc <= intrinsic_conc:\n raise ValueError(\n \"Donor concentration should be greater than intrinsic concentration\"\n )\n elif acceptor_conc <= intrinsic_conc:\n raise ValueError(\n \"Acceptor concentration should be greater than intrinsic concentration\"\n )\n else:\n return (\n Boltzmann\n * T\n * log((donor_conc * acceptor_conc) / intrinsic_conc**2)\n / physical_constants[\"electron volt\"][0]\n )",
"def __mul__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Volt(self.volts * other, self.volt_unit, self.freq, self.freq_unit)\n else:\n if self.volt_unit != other.volt_unit:\n raise ArithmeticError(f\"The objects' volt units {self.volt_unit} and {other.volt_unit} are not the\"\n f\" same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n prod_sum = self.volts * other.volts\n return Volt(prod_sum, self.volt_unit, self.freq, self.freq_unit)",
"def calculate_volume(self, s1, s2, refvol):\n s1, s2, refvol = float(s1), float(s2), float(refvol)\n return (refvol * s2 / s1) / (1 - s2 / s1)",
"def calc_current(self) -> (Amp, None):\n power: ComType = complex(0)\n power_unit: str = ''\n volt: NumType = 0\n volt_unit: str = ''\n ohm: ComType = complex(0)\n ohm_unit: str = ''\n if self._volt_exists and self._power_exists:\n if hasattr(self._obj1, 'power'):\n power, power_unit = self._obj1.power, self._obj1.power_unit\n elif hasattr(self._obj1, 'volts'):\n volt, volt_unit = self._obj1.volts, self._obj1.volt_unit\n if hasattr(self._obj2, 'power'):\n power, power_unit = self._obj2.power, self._obj2.power_unit\n elif hasattr(self._obj2, 'volts'):\n volt, volt_unit = self._obj2.volts, self._obj2.volt_unit\n current = power / volt\n current_unit: str = f'Amps ({power_unit}/{volt_unit})'\n\n elif self._volt_exists and self._ohm_exists:\n if hasattr(self._obj1, 'ohm'):\n ohm, ohm_unit = self._obj1.ohm, self._obj1.ohm_unit\n elif hasattr(self._obj1, 'volts'):\n volt, volt_unit = self._obj1.volts, self._obj1.volt_unit\n if hasattr(self._obj2, 'ohm'):\n ohm, ohm_unit = self._obj2.ohm, self._obj2.ohm_unit\n elif hasattr(self._obj2, 'volts'):\n volt, volt_unit = self._obj2.volts, self._obj2.volt_unit\n current = volt / ohm\n current_unit: str = f'Amps ({volt_unit}/{ohm_unit})'\n\n else:\n return None\n\n return Amp(current, current_unit, self._obj1.frequency, self._obj1.freq_unit)",
"def __truediv__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Volt(self.volts / other, self.volt_unit, self.freq, self.freq_unit)\n else:\n if self.volt_unit != other.volt_unit:\n raise ArithmeticError(f\"The objects' volt units {self.volt_unit} and {other.volt_unit} are not the\"\n f\" same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n prod_sum = self.volts / other.volts\n return Volt(prod_sum, self.volt_unit, self.freq, self.freq_unit)",
"def pv(self, other):\n\n assert self.n == other.n == 3, \"Produto vetorial definido somente em R3\"\n\n u, v = self, other\n\n return Vetor([u[1] * v[2] - u[2] * v[1],\n u[2] * v[0] - u[0] * v[2],\n u[0] * v[1] - u[1] * v[0]])",
"def reference_voltage(self) -> float:\n return self._ref_voltage",
"def obj(k_next) : \n \n if method==1 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*linear_interp(k_grid,v_update,k_next))\n elif method==2 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*quad_interp(k_grid,v_update,k_next))\n elif method==3 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*cubic_interp(k_grid,v_update,k_next))\n \n return value_vec",
"def __sub__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Volt(self.volts - other, self.volt_unit, self.freq, self.freq_unit)\n if self.volt_unit != other.volt_unit:\n raise ArithmeticError(f\"The objects' volt units {self.volt_unit} and {other.volt_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n volt_sum = self.volts - other.volts\n return Volt(volt_sum, self.volt_unit, self.freq, self.freq_unit)",
"def request_voltage_and_current(self):\n self.voltage_and_current = self.current_sensors.get_channel_voltage_and_current(self.channel)\n return self.voltage_and_current",
"def __call__ ( self , x , *args ) :\n #\n ## 1) evaluate the function \n val = self.func_eval ( x , *args )\n #\n ## no uncertainties? \n if isinstance ( x , num_types ) : return VE ( val , 0 )\n # ignore small or invalid uncertanties \n elif 0 >= x.cov2() or iszero ( x.cov2() ) : return VE ( val , 0 )\n #\n ## 2) evaluate the derivative\n dfun = self.__derivative\n d = dfun ( float ( x ) , *args ) \n ## 3) calculate the variance \n cov2 = d * d * x.cov2()\n ## 4) get a final result \n return VE ( val , cov2 )",
"def calc_out_voltage(self, input_photocurrent_file):\n pass",
"def v(self):\n\n # TODO This translation formula works, but needs simplified.\n\n # PWM duration can go from 0 to 4095 with 4095 representing max rpm\n# print(\"MuleBot.v MuleBot.dcMotorPWMDurationLeft:\", MuleBot.dcMotorPWMDurationLeft)\n speed_percentage = float(MuleBot.dcMotorPWMDurationLeft) / 4095.0\n# print(\"speed_percentage: \", speed_percentage)\n\n rpm = speed_percentage * self.motorMaxRPM\n# print(\"rpm: \", rpm)\n\n secondsPerMinute = 60\n revs_per_second = rpm / secondsPerMinute\n# print(\"--revs_per_second\", revs_per_second)\n\n inches_per_rev = 2.0 * math.pi * MuleBot.WHEEL_RADIUS\n INCHES_PER_METER = 39.3701\n meters_per_rev = inches_per_rev / INCHES_PER_METER\n# print(\"--meters_per_rev\", meters_per_rev)\n\n meters_per_second = meters_per_rev * revs_per_second\n\n# print(\"--meters_per_second: \", meters_per_second)\n return meters_per_second",
"def __rtruediv__(self, other):\r\n return other * self.reciprocal()",
"def voltage(self):\n return self.outputValue()",
"def set_voltages(): \n #0) set parameters\n from project_parameters import trapFile,multipoleControls,reg,driveFrequency,ax,az,phi,coefs\n import pickle\n with open(trapFile,'rb') as f:\n trap = pickle.load(f)\n V,X,Y,Z=trap.instance.DC,trap.instance.X,trap.instance.Y,trap.instance.Z\n tc=trap.configuration\n C = tc.multipoleControl\n el = []\n #1) check if trap_knobs has been run yet, creating multipoleControl and multipoleKernel\n if tc.trap_knobs != True:\n return 'WARNING: You must run trap_knobs first!'\n #2a) determine electrode voltages directly\n elif multipoleControls: # note plurality to contrast from attribute\n el = np.dot(C,coefs.T) # these are the electrode voltages\n #2b) determine electrode volages indirectly\n else:\n charge = tc.charge\n mass = tc.mass\n V0 = mass*(2*np.pi*frequencyRF)**2/charge\n U2 = az*V0/8\n U1 = U2+ax*V0/4\n U3 = 2*U1*np.tan(2*np.pi*(phi+tc.thetaRF)/180)\n U1p= np.sqrt(U1**2+U3**2/2)\n U4 = U1p*tc.Qrf[4]/tc.Qrf[1]\n U5 = U1p*tc.Qrf[5]/tc.Qrf[1]\n inp = np.array([E[0], E[1], E[2], U1, U2, U3, U4, U5]).T\n mCf = tc.multipoleCoefficients[1:9,:]\n el = np.dot(mCf.T,inp) # these are the electrode voltages\n el = np.real(el)\n #3) regularize if set to do so\n reg = 0\n if reg: \n C = el\n Lambda = np.linalg.lstsq(tc.multipoleKernel,C)\n Lambda=Lambda[0]\n el = el-(np.dot(tc.multipoleKernel,Lambda))\n return el"
] | [
"0.6214673",
"0.614571",
"0.6009014",
"0.5904963",
"0.5863507",
"0.5747999",
"0.5747329",
"0.56916034",
"0.567709",
"0.5654466",
"0.55869013",
"0.5584726",
"0.55484796",
"0.5530032",
"0.5502495",
"0.5500371",
"0.54994154",
"0.5498538",
"0.5459045",
"0.54423773",
"0.543532",
"0.543242",
"0.5428562",
"0.5408133",
"0.53809595",
"0.5366892",
"0.5364277",
"0.5356922",
"0.5355546",
"0.5334116"
] | 0.675314 | 0 |
Calculate impedance using the two passed objects. Based on the two objects the method will determine the correct equation to use. The method will return None if the objects cannot be used for the called method. | def calc_impedance(self) -> (Ohm, None):
power: ComType = complex(0)
power_unit: str = ''
amp: NumType = 0
amp_unit: str = ''
volt: NumType = 0
volt_unit: str = ''
if self._volt_exists and self._volt_exists:
if hasattr(self._obj1, 'volts'):
volt, volt_unit = self._obj1.volts, self._obj1.volt_unit
elif hasattr(self._obj1, 'amps'):
amp, amp_unit = self._obj1.amps, self._obj1.amp_unit
if hasattr(self._obj2, 'volts'):
volt, volt_unit = self._obj2.volts, self._obj2.volt_unit
elif hasattr(self._obj2, 'amps'):
amp, amp_unit = self._obj2.amps, self._obj2.amp_unit
z = volt / amp
z_unit: str = f'Ohms ({volt_unit}/{amp_unit})'
elif self._amp_exists and self._power_exists:
if hasattr(self._obj1, 'amps'):
amp, amp_unit = self._obj1.amps, self._obj1.amp_unit
elif hasattr(self._obj1, 'power'):
power, power_unit = self._obj1.power, self._obj1.power_unit
if hasattr(self._obj2, 'amps'):
amp, amp_unit = self._obj2.amps, self._obj2.amp_unit
elif hasattr(self._obj2, 'power'):
power, power_unit = self._obj2.power, self._obj2.power_unit
z = power / amp**2
z_unit: str = f'Ohms ({power_unit}/{amp_unit}^2)'
else:
return None
return Ohm(z, z_unit, self._obj1.frequency, self._obj1.freq_unit) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def di(o1, o2):\n return o1/o2",
"def calc(operand_1, operand_2):\n try:\n return operand_1/operand_2\n except ZeroDivisionError:\n return 0",
"def calc(operand_1, operand_2):\n return operand_1/operand_2",
"def calc(operand_1, operand_2):\n return operand_1/operand_2",
"def calc(operand_1, operand_2):\n return operand_1/operand_2",
"def p_obj1_given_obj2(self, obj1, obj2):\n if not obj2 in self.sums:\n raise ValueError(\"Object \" + `obj2` + \" not in self.sums.\")\n\n if self.sums[obj2] != 0:\n return self.prior[obj2][obj1] / float(self.sums[obj2])\n else:\n return self.prior[obj2][obj1] / (float(self.sums[obj2])+1.0)",
"def calc(operand_1, operand_2):\n\n return operand_1/operand_2",
"def calc(operand_1, operand_2):\n return operand_1 / operand_2",
"def calculate(self) -> float:",
"def _r_at_interface(self, polarization, n_1, n_2):\n if polarization == 's':\n return ((n_1-n_2)/(n_1+n_2))\n elif polarization == 'p':\n return ((n_1-n_2)/(n_1+n_2))\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")",
"def __mul__(self, other) -> object:\n common_numerator = self.numerator_a * other.numerator_a\n common_denominator = self.denominator_b * other.denominator_b\n\n # check for common divisor\n common_divisor = self.common_divisor(int(common_numerator), common_denominator)\n if common_divisor is None:\n res = Fraction(common_numerator, common_denominator)\n else:\n common_numerator = common_numerator / common_divisor\n common_denominator = common_denominator / common_divisor\n res = Fraction(common_numerator, int(common_denominator))\n return res",
"def calc_power(self) -> (Power, None):\n amp: NumType = 0\n amp_unit: str = ''\n volt: NumType = 0\n volt_unit: str = ''\n ohm: ComType = complex(0)\n ohm_unit: str = ''\n\n if self._amp_exists and self._volt_exists:\n if hasattr(self._obj1, 'amps'):\n amp, amp_unit = self._obj1.amps, self._obj1.amp_unit\n elif hasattr(self._obj1, 'volts'):\n volt, volt_unit = self._obj1.volts, self._obj1.volt_unit\n if hasattr(self._obj2, 'amps'):\n amp, amp_unit = self._obj2.amps, self._obj2.amp_unit\n elif hasattr(self._obj2, 'volts'):\n volt, volt_unit = self._obj2.volts, self._obj2.volt_unit\n pwr = amp * volt\n pwr_unit: str = f'{volt_unit}*{amp_unit}'\n\n elif self._amp_exists and self._ohm_exists:\n if hasattr(self._obj1, 'amps'):\n amp, amp_unit = self._obj1.amps, self._obj1.amp_unit\n elif hasattr(self._obj1, 'ohm'):\n ohm, ohm_unit = self._obj1.ohm, self._obj1.ohm_unit\n if hasattr(self._obj2, 'amps'):\n amp, amp_unit = self._obj2.amps, self._obj2.amp_unit\n elif hasattr(self._obj2, 'ohm'):\n ohm, ohm_unit = self._obj2.ohm, self._obj2.ohm_unit\n pwr = amp**2 * ohm\n pwr_unit: str = f'{amp_unit}^2*{ohm_unit}'\n\n elif self._volt_exists and self._ohm_exists:\n if hasattr(self._obj1, 'ohm'):\n ohm, ohm_unit = self._obj1.ohm, self._obj1.ohm_unit\n elif hasattr(self._obj1, 'volts'):\n volt, volt_unit = self._obj1.volts, self._obj1.volt_unit\n if hasattr(self._obj2, 'ohm'):\n ohm, ohm_unit = self._obj2.ohm, self._obj2.ohm_unit\n elif hasattr(self._obj2, 'volts'):\n volt, volt_unit = self._obj2.volts, self._obj2.volt_unit\n pwr = volt**2 / ohm\n pwr_unit: str = f'{volt_unit}^2/{ohm_unit}'\n\n else:\n return None\n\n return Power(pwr, pwr_unit, self._obj1.frequency, self._obj1.freq_unit)",
"def calc(self):\n\n # the following if query ensures that volume- and interaction-terms\n # are only calculated if tau > 0.\n # (to avoid nan-values from invalid function-evaluations)\n\n if self.V.tau.shape == (1,):\n Isurf = self.surface()\n # differentiation for non-existing canopy, as otherwise NAN values\n if self.V.tau > 0.:\n Ivol = self.volume()\n if self.int_Q is True:\n Iint = self.interaction()\n else:\n Iint = np.array([0.])\n else:\n Ivol = np.array([0.])\n Iint = np.array([0.])\n else:\n # calculate surface-term (valid for any tau-value)\n Isurf = self.surface()\n\n # store initial parameter-values\n old_t_0 = self.t_0\n old_p_0 = self.p_0\n old_t_ex = self.t_ex\n old_p_ex = self.p_ex\n\n old_tau = self.V._get_tau()\n old_omega = self.V._get_omega()\n old_NN = self.SRF._get_NormBRDF()\n\n # set mask for tau > 0.\n mask = old_tau > 0.\n valid_index = np.where(mask)\n inval_index = np.where(~mask)\n\n # set parameter-values to valid values for calculation\n self.t_0 = old_t_0[valid_index[0]]\n self.p_0 = old_p_0[valid_index[0]]\n self.t_ex = old_t_ex[valid_index[0]]\n self.p_ex = old_p_ex[valid_index[0]]\n\n # squeezing the arrays is necessary since the setter-function for\n # tau, omega and NormBRDF automatically adds an axis to the arrays!\n self.V.tau = np.squeeze(old_tau[valid_index[0]])\n if np.array(self.V.omega).size != 1:\n self.V.omega = np.squeeze(old_omega[valid_index[0]])\n if np.array(self.SRF.NormBRDF).size != 1:\n self.SRF.NormBRDF = np.squeeze(old_NN[valid_index[0]])\n\n # calculate volume and interaction term where tau-values are valid\n _Ivol = self.volume()\n if self.int_Q is True:\n _Iint = self.interaction()\n else:\n _Iint = np.full_like(self.t_0, 0.)\n\n # reset parameter values to old values\n self.t_0 = old_t_0\n self.p_0 = old_p_0\n self.t_ex = old_t_ex\n self.p_ex = old_p_ex\n\n # squeezing the arrays is necessary since the setter-function for\n # tau, omega and NormBRDF automatically add an axis to the arrays!\n self.V.tau = np.squeeze(old_tau)\n self.V.omega = np.squeeze(old_omega)\n self.SRF.NormBRDF = np.squeeze(old_NN)\n\n # combine calculated volume-contributions for valid tau-values\n # with zero-arrays for invalid tau-values\n Ivol = np.ones_like(self.t_0)\n Ivol[valid_index[0]] = _Ivol\n Ivol[inval_index[0]] = np.ones_like(Ivol[inval_index[0]]) * 0.\n\n # combine calculated interaction-contributions for valid tau-values\n # with zero-arrays for invalid tau-values\n if self.int_Q is True:\n Iint = np.ones_like(self.t_0)\n Iint[valid_index[0]] = _Iint\n Iint[inval_index[0]] = np.ones_like(Iint[inval_index[0]]) * 0.\n else:\n Iint = np.full_like(self.t_0, 0.)\n\n return Isurf + Ivol + Iint, Isurf, Ivol, Iint",
"def equations(self):\n k = 0\n ######################################################################\n # equations for fluid balance\n self.residual[k:k + self.num_nw_fluids] = self.fluid_func()\n k += self.num_nw_fluids\n\n ######################################################################\n # equations for mass flow balance\n self.residual[k] = self.mass_flow_func()\n k += 1\n\n ######################################################################\n # equations for specified heta transfer\n if self.Q.is_set:\n self.residual[k] = self.inl[0].m.val_SI * (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI) - self.Q.val\n k += 1\n\n ######################################################################\n # equations for specified pressure ratio\n if self.pr.is_set:\n self.residual[k] = (\n self.inl[0].p.val_SI * self.pr.val - self.outl[0].p.val_SI)\n k += 1\n\n ######################################################################\n # equations for specified zeta\n if self.zeta.is_set:\n if np.absolute(self.residual[k]) > err ** 2 or self.it % 4 == 0:\n self.residual[k] = self.zeta_func(zeta='zeta')\n k += 1\n\n ######################################################################\n # equation for specified hydro-group paremeters\n if self.hydro_group.is_set:\n if np.absolute(self.residual[k]) > err ** 2 or self.it % 4 == 0:\n # hazen williams equation\n if self.hydro_group.method == 'HW':\n func = self.hw_func\n # darcy friction factor\n else:\n func = self.darcy_func\n self.residual[k] = func()\n k += 1\n\n ######################################################################\n # additional equations\n self.additional_equations(k)",
"def autohard(equation):\n\n try:\n # Try to set a variable to an integer\n num1 = int(equation.split(\" \")[1])\n\n except ValueError:\n # Try to set a variable to a decimal\n num1 = float(equation.split(\" \")[1])\n\n # If the lowercase version of the operation equals 'log'\n if equation.split(\" \")[0].lower() == \"log\":\n # Return the answer\n return math.log(num1)\n\n # If the lowercase version of the operation equals 'acos'\n elif equation.split(\" \")[0].lower() == \"acos\":\n # Return the answer\n return math.acos(num1)\n\n # If the lowercase version of the operation equals 'asin'\n elif equation.split(\" \")[0].lower() == \"asin\":\n # Return the answer\n return math.asin(num1)\n\n # If the lowercase version of the operation equals 'atan'\n elif equation.split(\" \")[0].lower() == \"atan\":\n # Return the answer\n return math.atan(num1)\n\n # If the lowercase version of the operation equals 'cos'\n elif equation.split(\" \")[0].lower() == \"cos\":\n # Return the answer\n return math.cos(num1)\n\n # If the lowercase version of the operation equals 'hypot'\n elif equation.split(\" \")[0].lower() == \"hypot\":\n try:\n # Try to set a variable to an integer\n num2 = int(equation.split(\" \")[2])\n\n except ValueError:\n # Try to set a variable to an decimal\n num2 = float(equation.split(\" \")[2])\n\n # Return the answer\n return math.hypot(num1, num2)\n\n # If the lowercase version of the operation equals 'sin'\n elif equation.split(\" \")[0].lower() == \"sin\":\n # Return the answer\n return math.sin(num1)\n\n # If the lowercase version of the operation equals 'tan'\n elif equation.split(\" \")[0].lower() == \"tan\":\n # Return the answer\n return math.tan(num1)\n\n # Raise a warning\n raise ValueError(\"Invalid operation entered.\")",
"def calc(operand_1, operand_2):\n return operand_1 - operand_2",
"def calc(operand_1, operand_2):\n return operand_1 - operand_2",
"def _calculate_gravity(self, object_2, object_1):\n\n def _calculate_angle(x0, y0, x1, y1):\n \"\"\"Counts angle in radians between vector (x0, y0)(x1, y1) and horizontal axis (CW) in canvas\n coordinate system\n :returns 0 if x0 == y0 == x1 == y1 == 0\n [0.. +3.14] if vector points down\n (-3.14.. 0] if vector points up\n \"\"\"\n if x0 == y0 == x1 == y1 == 0:\n return 0\n\n if x1 - x0 > 0: # pointing to the right semi-plane\n angle = atan((y1 - y0) / (x1 - x0))\n elif x1 - x0 < 0 and y1 - y0 >= 0: # adding pi if pointing to the left-bottom quart\n angle = pi + atan((y1 - y0) / (x1 - x0))\n elif x1 - x0 < 0 and y1 - y0 < 0: # subtract pi if pointing to the left-upper quart\n angle = -pi + atan((y1 - y0) / (x1 - x0))\n else: # zerodevision handle\n if y1 - y0 > 0: # pointing down\n angle = pi / 2\n else: # pointing up\n angle = -pi / 2\n\n return angle\n\n m1, x1, y1 = self._get_object_params(object_1)\n m2, x2, y2 = self._get_object_params(object_2)\n R = ((x2 - x1) ** 2 + (y2 - y1) ** 2) ** 0.5\n F = G * m1 * m2 / R ** 2\n angle = _calculate_angle(x1, y1, x2, y2)\n Fx1 = F * cos(angle)\n Fy1 = F * sin(angle)\n Fy2, Fx2 = -Fy1, -Fx1 # vectors are exactly opposite\n return Fx2, Fy2, Fx1, Fy1",
"def calc(operand_a, operand_b):\n return operand_a - operand_b",
"def calc_current(self) -> (Amp, None):\n power: ComType = complex(0)\n power_unit: str = ''\n volt: NumType = 0\n volt_unit: str = ''\n ohm: ComType = complex(0)\n ohm_unit: str = ''\n if self._volt_exists and self._power_exists:\n if hasattr(self._obj1, 'power'):\n power, power_unit = self._obj1.power, self._obj1.power_unit\n elif hasattr(self._obj1, 'volts'):\n volt, volt_unit = self._obj1.volts, self._obj1.volt_unit\n if hasattr(self._obj2, 'power'):\n power, power_unit = self._obj2.power, self._obj2.power_unit\n elif hasattr(self._obj2, 'volts'):\n volt, volt_unit = self._obj2.volts, self._obj2.volt_unit\n current = power / volt\n current_unit: str = f'Amps ({power_unit}/{volt_unit})'\n\n elif self._volt_exists and self._ohm_exists:\n if hasattr(self._obj1, 'ohm'):\n ohm, ohm_unit = self._obj1.ohm, self._obj1.ohm_unit\n elif hasattr(self._obj1, 'volts'):\n volt, volt_unit = self._obj1.volts, self._obj1.volt_unit\n if hasattr(self._obj2, 'ohm'):\n ohm, ohm_unit = self._obj2.ohm, self._obj2.ohm_unit\n elif hasattr(self._obj2, 'volts'):\n volt, volt_unit = self._obj2.volts, self._obj2.volt_unit\n current = volt / ohm\n current_unit: str = f'Amps ({volt_unit}/{ohm_unit})'\n\n else:\n return None\n\n return Amp(current, current_unit, self._obj1.frequency, self._obj1.freq_unit)",
"def calc(operand_1, operand_2):\n return operand_1 + operand_2",
"def calc(operand_1, operand_2):\n return operand_1 + operand_2",
"def equations(self):\n k = 0\n ######################################################################\n # equations for fluid balance\n self.residual[k:k + self.num_nw_fluids * 2] = self.fluid_func()\n k += self.num_nw_fluids * 2\n\n ######################################################################\n # equations for mass flow balance\n self.residual[k:k + 2] = self.mass_flow_func()\n k += 2\n\n ######################################################################\n # equations for energy balance\n self.residual[k] = self.energy_func()\n k += 1\n\n ######################################################################\n # equations for specified heat transfer\n if self.Q.is_set:\n self.residual[k] = (\n self.inl[0].m.val_SI * (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI) - self.Q.val)\n k += 1\n\n ######################################################################\n # equations for specified heat transfer coefficient\n if self.kA.is_set:\n if np.absolute(self.residual[k]) > err ** 2 or self.it % 4 == 0:\n self.residual[k] = self.kA_func()\n k += 1\n\n ######################################################################\n # equations for specified heat transfer coefficient characteristic\n if self.kA_char.is_set:\n if np.absolute(self.residual[k]) > err ** 2 or self.it % 4 == 0:\n self.residual[k] = self.kA_char_func()\n k += 1\n\n ######################################################################\n # equations for specified upper terminal temperature difference\n if self.ttd_u.is_set:\n self.residual[k] = self.ttd_u_func()\n k += 1\n\n ######################################################################\n # equations for specified lower terminal temperature difference\n if self.ttd_l.is_set:\n self.residual[k] = self.ttd_l_func()\n k += 1\n\n ######################################################################\n # equations for specified pressure ratio at hot side\n if self.pr1.is_set:\n self.residual[k] = (\n self.pr1.val * self.inl[0].p.val_SI - self.outl[0].p.val_SI)\n k += 1\n\n ######################################################################\n # equations for specified pressure ratio at cold side\n if self.pr2.is_set:\n self.residual[k] = (\n self.pr2.val * self.inl[1].p.val_SI - self.outl[1].p.val_SI)\n k += 1\n\n ######################################################################\n # equations for specified zeta at hot side\n if self.zeta1.is_set:\n if np.absolute(self.residual[k]) > err ** 2 or self.it % 4 == 0:\n self.residual[k] = self.zeta_func(\n zeta='zeta1', inconn=0, outconn=0)\n k += 1\n\n ######################################################################\n # equations for specified zeta at cold side\n if self.zeta2.is_set:\n if np.absolute(self.residual[k]) > err ** 2 or self.it % 4 == 0:\n self.residual[k] = self.zeta_func(\n zeta='zeta2', inconn=1, outconn=1)\n k += 1\n\n ######################################################################\n # additional equations\n self.additional_equations(k)",
"def eval(self, Vobj):\n try:\n return Vobj.evaluated_on(self)\n except AttributeError:\n return self.A() * Vobj + self.b()",
"def calculate(self):",
"def __mul__(self,other):\n if(self.denominator*other.denominator<0):\n resultnumerator = -1*self.numerator*other.numerator\n resultdenominator = abs(self.denominator*other.denominator) \n else:\n resultnumerator = self.numerator*other.numerator\n resultdenominator = self.denominator*other.denominator \n newvalues = (resultnumerator,resultdenominator)\n return newvalues",
"def impulse(self,v1,v2):\n dv_peri = self.v_peri - v1\n \n dv_aphe = self.v_peri - v2\n \n return dv_peri, dv_aphe",
"def calc(operand_1, operand_2):\n\n return operand_1 + operand_2",
"def inverse_linear ( a , b ) :\n \n x0 , f0 = a.x , a.fx\n x1 , f1 = b.x , b.fx\n\n if f0 == f1 or isequal ( f0 , f1 ) : return None\n\n return ( x0 * f1 - x1 * f0 ) / ( f1 - f0 )",
"def __add__(self, other) -> object:\n least_common_multiple = self.denominator_b\n # check denominators of fractions and if no define their least common multiple\n if self.denominator_b != other.denominator_b:\n least_common_multiple = self.least_common_multiple_func(other.denominator_b)\n\n common_numerator = (least_common_multiple / self.denominator_b * self.numerator_a) + \\\n (least_common_multiple / other.denominator_b * other.numerator_a)\n\n # check for common divisor\n common_divisor = self.common_divisor(int(common_numerator), least_common_multiple)\n if common_divisor is None:\n res = Fraction(common_numerator, least_common_multiple)\n else:\n common_numerator = common_numerator / common_divisor\n least_common_multiple = least_common_multiple / common_divisor\n res = Fraction(common_numerator, int(least_common_multiple))\n return res"
] | [
"0.601999",
"0.5506426",
"0.541305",
"0.541305",
"0.541305",
"0.5373895",
"0.53608656",
"0.5356382",
"0.53304803",
"0.52975565",
"0.5249021",
"0.52109116",
"0.5167401",
"0.513542",
"0.50895154",
"0.50706196",
"0.50706196",
"0.5034539",
"0.4954093",
"0.49457806",
"0.49324915",
"0.49324915",
"0.4905737",
"0.48826617",
"0.48714992",
"0.4869592",
"0.48683017",
"0.48647088",
"0.48620048",
"0.48565114"
] | 0.6352273 | 0 |
function to check if the current value is in a future time | def val_future_time(value):
today = timezone.now()
if value < today:
raise ValidationError('Datetime should be a future Date and time') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def date_in_future(date) -> bool:\n is_in_the_future = time_after(date)\n return is_in_the_future",
"def valid(t):\n return float(t) > time.time()",
"def isPast(self):\n return (self._t < time())",
"def is_in_the_future(dt):\n if dt > datetime.now(pytz.utc):\n return True\n return False",
"def checkAtFinalTime():\n global final_time\n if final_time <= current_second:\n return True\n return False",
"def is_timestamp_in_future(timestamp: int) -> bool:\n timestamp_datetime = timestamp_to_datetime(timestamp)\n now = datetime_utc_now()\n\n return timestamp_datetime > now",
"def isFuture(self):\n return (self._t > time())",
"def check_time():\n times = get_times()\n time_difference = abs((times['local'] - times['target']).total_seconds())\n return time_difference < post_time_tol_seconds",
"def is_past(self) -> bool:\n return self.start < timezone.now()",
"def run_now(self, local_time):\n return self.start_time <= local_time < self.stop_time",
"def waitUntil(t: Time_t) -> bool:\n now = dt.datetime.now(t.tzinfo)\n secs = (_fillDate(t) - now).total_seconds()\n run(asyncio.sleep(secs))\n return True",
"def is_upcoming(self):\n\n return timezone.now() < self.start < timezone.now() + timedelta(days=1)",
"def date_in_past(date: dt.datetime) -> bool:\n return date < dt.datetime.now()",
"def _check_date_not_in_future(self, date):\n if date is None:\n pass\n else:\n assert (\n date <= datetime.datetime.now()\n ), \"Provided date cannot be in the future\"",
"def _is_past_due(self):\n if self.due is None:\n return False\n\n if timezone.now() > self.due:\n return True",
"def check_if_ok_to_update(self):\n current_time = int(time.time())\n last_refresh = self.last_refresh\n if last_refresh is None:\n last_refresh = 0\n if current_time >= (last_refresh + self.refresh_rate):\n return True\n return False",
"def check_timer(self, wanted_time):\n if time.time() - self.start_time >= wanted_time:\n return True\n return False",
"def aired(self):\n # TODO: timezone\n airdatetime = self.airdatetime\n if airdatetime:\n return datetime.now() >= airdatetime + timedelta(minutes=self.series.runtime)\n else:\n return False",
"def val_future_end_time(value):\n today = timezone.now() + timezone.timedelta(minutes=settings.MIN_INTERVIEW_DURATION)\n if value < today:\n raise ValidationError(f'Datetime should be atleast {settings.MIN_INTERVIEW_DURATION} min after current Date and time')",
"def event_too_old_or_in_future(event, config):\n\n current_time = time.time()\n try:\n event_time = datetime.strptime(event['time'], constants.TIMEFMT).\\\n replace(tzinfo=timezone.utc).timestamp()\n except ValueError:\n event_time = datetime.strptime(event['time'], constants.ALT_TIMEFMT).\\\n replace(tzinfo=timezone.utc).timestamp()\n if config['old_event_age'] >= 0 and \\\n event_time + config['old_event_age'] < current_time:\n return True\n if config['future_event_age'] >= 0 and \\\n event_time - config['future_event_age'] > current_time:\n return True\n\n return False",
"def is_real_time(self):\n return time.time() - self.timestamp < self._DEADLINE_SEC",
"def stale(self, now: datetime | None = None) -> bool:\n return (\n self.last_seen is None\n or (now or dt_util.utcnow()) - self.last_seen > self.consider_home\n )",
"def is_soon(dt, window):\r\n soon = (utcnow() + datetime.timedelta(seconds=window))\r\n return normalize_time(dt) <= soon",
"def is_past_due(self):\r\n return (self.close_date is not None and\r\n datetime.datetime.now(UTC()) > self.close_date)",
"def is_soon(dt, window):\n soon = (utcnow() + datetime.timedelta(seconds=window))\n return normalize_time(dt) <= soon",
"def check_attack(self):\n now = time.time() * 1000\n if self.prev_time is None:\n return True\n else:\n next_time = self.prev_time + self.get_recharge\n if now >= next_time:\n return True\n else:\n return False",
"def valid(self):\n return self.expiry > timezone.now()",
"def has_expired(self, now):\n if now < self._expires:\n return False\n\n return self._enclave_wait_timer.has_expired()",
"def has_expired(self):\n self.ensure_one()\n return datetime.now() > fields.Datetime.from_string(self.expires)",
"def is_expired(self):\n return int(time.time()) - self.time > self.interval"
] | [
"0.742416",
"0.7342394",
"0.72829944",
"0.7265453",
"0.7176794",
"0.7147083",
"0.7041912",
"0.70314723",
"0.6834246",
"0.67728204",
"0.6656451",
"0.66316754",
"0.6564517",
"0.65577716",
"0.65541255",
"0.65340716",
"0.65168554",
"0.64972854",
"0.64915395",
"0.6470125",
"0.64528894",
"0.6428874",
"0.64198655",
"0.63937694",
"0.63751626",
"0.63734514",
"0.63627046",
"0.6317768",
"0.6316427",
"0.6312413"
] | 0.7358469 | 1 |
''' In words ending with 'y', this function replaces 'y' by 'i'. step1c turns terminal y to i when there is another vowel in the stem.""" ''' | def step1c(self, word):
if word.endswith('y'):
result = word.rfind('y')
base = word[:result]
if self.containsVowel(base):
word = base
word += 'i'
return word | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def step1c(self):\n\t\tif (self.ends(\"y\") and self.vowelinstem()):\n\t\t\tself.b = self.b[:self.k] + 'i' + self.b[self.k+1:]",
"def _step1c(self, word):\n\n def nltk_condition(stem):\n \"\"\"\n This has been modified from the original Porter algorithm so\n that y->i is only done when y is preceded by a consonant,\n but not if the stem is only a single consonant, i.e.\n\n (*c and not c) Y -> I\n\n So 'happy' -> 'happi', but\n 'enjoy' -> 'enjoy' etc\n\n This is a much better rule. Formerly 'enjoy'->'enjoi' and\n 'enjoyment'->'enjoy'. Step 1c is perhaps done too soon; but\n with this modification that no longer really matters.\n\n Also, the removal of the contains_vowel(z) condition means\n that 'spy', 'fly', 'try' ... stem to 'spi', 'fli', 'tri' and\n conflate with 'spied', 'tried', 'flies' ...\n \"\"\"\n return len(stem) > 1 and self._is_consonant(stem, len(stem) - 1)\n\n def original_condition(stem):\n return self._contains_vowel(stem)\n\n return self._apply_rule_list(\n word,\n [\n (\n \"y\",\n \"i\",\n nltk_condition\n if self.mode == self.NLTK_EXTENSIONS\n else original_condition,\n )\n ],\n )",
"def _step1b(self, word):\n # this NLTK-only block extends the original algorithm, so that\n # 'spied'->'spi' but 'died'->'die' etc\n if self.mode == self.NLTK_EXTENSIONS:\n if word.endswith(\"ied\"):\n if len(word) == 4:\n return self._replace_suffix(word, \"ied\", \"ie\")\n else:\n return self._replace_suffix(word, \"ied\", \"i\")\n\n # (m>0) EED -> EE\n if word.endswith(\"eed\"):\n stem = self._replace_suffix(word, \"eed\", \"\")\n if self._measure(stem) > 0:\n return stem + \"ee\"\n else:\n return word\n\n rule_2_or_3_succeeded = False\n\n for suffix in [\"ed\", \"ing\"]:\n if word.endswith(suffix):\n intermediate_stem = self._replace_suffix(word, suffix, \"\")\n if self._contains_vowel(intermediate_stem):\n rule_2_or_3_succeeded = True\n break\n\n if not rule_2_or_3_succeeded:\n return word\n\n return self._apply_rule_list(\n intermediate_stem,\n [\n (\"at\", \"ate\", None), # AT -> ATE\n (\"bl\", \"ble\", None), # BL -> BLE\n (\"iz\", \"ize\", None), # IZ -> IZE\n # (*d and not (*L or *S or *Z))\n # -> single letter\n (\n \"*d\",\n intermediate_stem[-1],\n lambda stem: intermediate_stem[-1] not in (\"l\", \"s\", \"z\"),\n ),\n # (m=1 and *o) -> E\n (\n \"\",\n \"e\",\n lambda stem: (self._measure(stem) == 1 and self._ends_cvc(stem)),\n ),\n ],\n )",
"def stem(self, word):\n word = word.lower()\n\n if word in self.__special_words:\n return self.__special_words[word]\n\n # Map the different apostrophe characters to a single consistent one\n word = (word.replace(u(\"\\u2019\"), u(\"\\x27\"))\n .replace(u(\"\\u2018\"), u(\"\\x27\"))\n .replace(u(\"\\u201B\"), u(\"\\x27\")))\n\n if word.startswith(u(\"\\x27\")):\n word = word[1:]\n\n if word.startswith(\"y\"):\n word = \"\".join((\"Y\", word[1:]))\n\n for i in range(1, len(word)):\n if word[i - 1] in self.__vowels and word[i] == \"y\":\n word = \"\".join((word[:i], \"Y\", word[i + 1:]))\n\n step1a_vowel_found = False\n step1b_vowel_found = False\n\n r1 = \"\"\n r2 = \"\"\n\n if word.startswith((\"gener\", \"commun\", \"arsen\")):\n if word.startswith((\"gener\", \"arsen\")):\n r1 = word[5:]\n else:\n r1 = word[6:]\n\n for i in range(1, len(r1)):\n if r1[i] not in self.__vowels and r1[i - 1] in self.__vowels:\n r2 = r1[i + 1:]\n break\n else:\n r1, r2 = self._r1r2_standard(word, self.__vowels)\n\n # STEP 0\n for suffix in self.__step0_suffixes:\n if word.endswith(suffix):\n word = word[:-len(suffix)]\n r1 = r1[:-len(suffix)]\n r2 = r2[:-len(suffix)]\n break\n\n # STEP 1a\n for suffix in self.__step1a_suffixes:\n if word.endswith(suffix):\n\n if suffix == \"sses\":\n word = word[:-2]\n r1 = r1[:-2]\n r2 = r2[:-2]\n\n elif suffix in (\"ied\", \"ies\"):\n if len(word[:-len(suffix)]) > 1:\n word = word[:-2]\n r1 = r1[:-2]\n r2 = r2[:-2]\n else:\n word = word[:-1]\n r1 = r1[:-1]\n r2 = r2[:-1]\n\n elif suffix == \"s\":\n for letter in word[:-2]:\n if letter in self.__vowels:\n step1a_vowel_found = True\n break\n\n if step1a_vowel_found:\n word = word[:-1]\n r1 = r1[:-1]\n r2 = r2[:-1]\n break\n\n # STEP 1b\n for suffix in self.__step1b_suffixes:\n if word.endswith(suffix):\n if suffix in (\"eed\", \"eedly\"):\n\n if r1.endswith(suffix):\n word = \"\".join((word[:-len(suffix)], \"ee\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"ee\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"ee\"))\n else:\n r2 = \"\"\n else:\n for letter in word[:-len(suffix)]:\n if letter in self.__vowels:\n step1b_vowel_found = True\n break\n\n if step1b_vowel_found:\n word = word[:-len(suffix)]\n r1 = r1[:-len(suffix)]\n r2 = r2[:-len(suffix)]\n\n if word.endswith((\"at\", \"bl\", \"iz\")):\n word = \"\".join((word, \"e\"))\n r1 = \"\".join((r1, \"e\"))\n\n if len(word) > 5 or len(r1) >= 3:\n r2 = \"\".join((r2, \"e\"))\n\n elif word.endswith(self.__double_consonants):\n word = word[:-1]\n r1 = r1[:-1]\n r2 = r2[:-1]\n\n elif ((r1 == \"\" and len(word) >= 3 and\n word[-1] not in self.__vowels and\n word[-1] not in \"wxY\" and\n word[-2] in self.__vowels and\n word[-3] not in self.__vowels)\n or\n (r1 == \"\" and len(word) == 2 and\n word[0] in self.__vowels and\n word[1] not in self.__vowels)):\n\n word = \"\".join((word, \"e\"))\n\n if len(r1) > 0:\n r1 = \"\".join((r1, \"e\"))\n\n if len(r2) > 0:\n r2 = \"\".join((r2, \"e\"))\n break\n\n # STEP 1c\n if (len(word) > 2\n and word[-1] in \"yY\"\n and word[-2] not in self.__vowels):\n word = \"\".join((word[:-1], \"i\"))\n if len(r1) >= 1:\n r1 = \"\".join((r1[:-1], \"i\"))\n else:\n r1 = \"\"\n\n if len(r2) >= 1:\n r2 = \"\".join((r2[:-1], \"i\"))\n else:\n r2 = \"\"\n\n # STEP 2\n for suffix in self.__step2_suffixes:\n if word.endswith(suffix):\n if r1.endswith(suffix):\n if suffix == \"tional\":\n word = word[:-2]\n r1 = r1[:-2]\n r2 = r2[:-2]\n\n elif suffix in (\"enci\", \"anci\", \"abli\"):\n word = \"\".join((word[:-1], \"e\"))\n\n if len(r1) >= 1:\n r1 = \"\".join((r1[:-1], \"e\"))\n else:\n r1 = \"\"\n\n if len(r2) >= 1:\n r2 = \"\".join((r2[:-1], \"e\"))\n else:\n r2 = \"\"\n\n elif suffix == \"entli\":\n word = word[:-2]\n r1 = r1[:-2]\n r2 = r2[:-2]\n\n elif suffix in (\"izer\", \"ization\"):\n word = \"\".join((word[:-len(suffix)], \"ize\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"ize\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"ize\"))\n else:\n r2 = \"\"\n\n elif suffix in (\"ational\", \"ation\", \"ator\"):\n word = \"\".join((word[:-len(suffix)], \"ate\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"ate\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"ate\"))\n else:\n r2 = \"e\"\n\n elif suffix in (\"alism\", \"aliti\", \"alli\"):\n word = \"\".join((word[:-len(suffix)], \"al\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"al\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"al\"))\n else:\n r2 = \"\"\n\n elif suffix == \"fulness\":\n word = word[:-4]\n r1 = r1[:-4]\n r2 = r2[:-4]\n\n elif suffix in (\"ousli\", \"ousness\"):\n word = \"\".join((word[:-len(suffix)], \"ous\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"ous\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"ous\"))\n else:\n r2 = \"\"\n\n elif suffix in (\"iveness\", \"iviti\"):\n word = \"\".join((word[:-len(suffix)], \"ive\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"ive\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"ive\"))\n else:\n r2 = \"e\"\n\n elif suffix in (\"biliti\", \"bli\"):\n word = \"\".join((word[:-len(suffix)], \"ble\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"ble\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"ble\"))\n else:\n r2 = \"\"\n\n elif suffix == \"ogi\" and word[-4] == \"l\":\n word = word[:-1]\n r1 = r1[:-1]\n r2 = r2[:-1]\n\n elif suffix in (\"fulli\", \"lessli\"):\n word = word[:-2]\n r1 = r1[:-2]\n r2 = r2[:-2]\n\n elif suffix == \"li\" and word[-3] in self.__li_ending:\n word = word[:-2]\n r1 = r1[:-2]\n r2 = r2[:-2]\n break\n\n # STEP 3\n for suffix in self.__step3_suffixes:\n if word.endswith(suffix):\n if r1.endswith(suffix):\n if suffix == \"tional\":\n word = word[:-2]\n r1 = r1[:-2]\n r2 = r2[:-2]\n\n elif suffix == \"ational\":\n word = \"\".join((word[:-len(suffix)], \"ate\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"ate\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"ate\"))\n else:\n r2 = \"\"\n\n elif suffix == \"alize\":\n word = word[:-3]\n r1 = r1[:-3]\n r2 = r2[:-3]\n\n elif suffix in (\"icate\", \"iciti\", \"ical\"):\n word = \"\".join((word[:-len(suffix)], \"ic\"))\n\n if len(r1) >= len(suffix):\n r1 = \"\".join((r1[:-len(suffix)], \"ic\"))\n else:\n r1 = \"\"\n\n if len(r2) >= len(suffix):\n r2 = \"\".join((r2[:-len(suffix)], \"ic\"))\n else:\n r2 = \"\"\n\n elif suffix in (\"ful\", \"ness\"):\n word = word[:-len(suffix)]\n r1 = r1[:-len(suffix)]\n r2 = r2[:-len(suffix)]\n\n elif suffix == \"ative\" and r2.endswith(suffix):\n word = word[:-5]\n r1 = r1[:-5]\n r2 = r2[:-5]\n break\n\n # STEP 4\n for suffix in self.__step4_suffixes:\n if word.endswith(suffix):\n if r2.endswith(suffix):\n if suffix == \"ion\":\n if word[-4] in \"st\":\n word = word[:-3]\n r1 = r1[:-3]\n r2 = r2[:-3]\n else:\n word = word[:-len(suffix)]\n r1 = r1[:-len(suffix)]\n r2 = r2[:-len(suffix)]\n break\n\n # STEP 5\n if r2.endswith(\"l\") and word[-2] == \"l\":\n word = word[:-1]\n elif r2.endswith(\"e\"):\n word = word[:-1]\n elif r1.endswith(\"e\"):\n if len(word) >= 4 and (word[-2] in self.__vowels or\n word[-2] in \"wxY\" or\n word[-3] not in self.__vowels or\n word[-4] in self.__vowels):\n word = word[:-1]\n\n word = word.replace(\"Y\", \"y\")\n return word",
"def step2(self):\n\t\tif self.b[self.k - 1] == 'a':\n\t\t\tif self.ends(\"ational\"): self.r(\"ate\")\n\t\t\telif self.ends(\"tional\"): self.r(\"tion\")\n\t\telif self.b[self.k - 1] == 'c':\n\t\t\tif self.ends(\"enci\"):\t self.r(\"ence\")\n\t\t\telif self.ends(\"anci\"): self.r(\"ance\")\n\t\telif self.b[self.k - 1] == 'e':\n\t\t\tif self.ends(\"izer\"):\t self.r(\"ize\")\n\t\telif self.b[self.k - 1] == 'l':\n\t\t\tif self.ends(\"bli\"):\t self.r(\"ble\") # --DEPARTURE--\n\t\t\t# To match the published algorithm, replace this phrase with\n\t\t\t#\tif self.ends(\"abli\"):\t self.r(\"able\")\n\t\t\telif self.ends(\"alli\"): self.r(\"al\")\n\t\t\telif self.ends(\"entli\"): self.r(\"ent\")\n\t\t\telif self.ends(\"eli\"):\t self.r(\"e\")\n\t\t\telif self.ends(\"ousli\"): self.r(\"ous\")\n\t\telif self.b[self.k - 1] == 'o':\n\t\t\tif self.ends(\"ization\"): self.r(\"ize\")\n\t\t\telif self.ends(\"ation\"): self.r(\"ate\")\n\t\t\telif self.ends(\"ator\"): self.r(\"ate\")\n\t\telif self.b[self.k - 1] == 's':\n\t\t\tif self.ends(\"alism\"):\t self.r(\"al\")\n\t\t\telif self.ends(\"iveness\"): self.r(\"ive\")\n\t\t\telif self.ends(\"fulness\"): self.r(\"ful\")\n\t\t\telif self.ends(\"ousness\"): self.r(\"ous\")\n\t\telif self.b[self.k - 1] == 't':\n\t\t\tif self.ends(\"aliti\"):\t self.r(\"al\")\n\t\t\telif self.ends(\"iviti\"): self.r(\"ive\")\n\t\t\telif self.ends(\"biliti\"): self.r(\"ble\")\n\t\telif self.b[self.k - 1] == 'g': # --DEPARTURE--\n\t\t\tif self.ends(\"logi\"):\t self.r(\"log\")\n\t\t# To match the published algorithm, delete this phrase",
"def step1ab(self):\n\t\tif self.b[self.k] == 's':\n\t\t\tif self.ends(\"sses\"):\n\t\t\t\tself.k = self.k - 2\n\t\t\telif self.ends(\"ies\"):\n\t\t\t\tself.setto(\"i\")\n\t\t\telif self.b[self.k - 1] != 's':\n\t\t\t\tself.k = self.k - 1\n\t\tif self.ends(\"eed\"):\n\t\t\tif self.m() > 0:\n\t\t\t\tself.k = self.k - 1\n\t\telif (self.ends(\"ed\") or self.ends(\"ing\")) and self.vowelinstem():\n\t\t\tself.k = self.j\n\t\t\tif self.ends(\"at\"): self.setto(\"ate\")\n\t\t\telif self.ends(\"bl\"): self.setto(\"ble\")\n\t\t\telif self.ends(\"iz\"): self.setto(\"ize\")\n\t\t\telif self.doublec(self.k):\n\t\t\t\tself.k = self.k - 1\n\t\t\t\tch = self.b[self.k]\n\t\t\t\tif ch == 'l' or ch == 's' or ch == 'z':\n\t\t\t\t\tself.k = self.k + 1\n\t\t\telif (self.m() == 1 and self.cvc(self.k)):\n\t\t\t\tself.setto(\"e\")",
"def translate(self):\n\t\tvowels = \"aeiou\"\n\n\t\tif (self.word[0] not in vowels) and (self.word[1] in vowels):\n\t\t\tnew_word = self.word[1:] + self.word[0] + \"ay\"\n\t\telif self.word[0] in vowels:\n\t\t\tnew_word = self.word + \"way\"\n\t\telse:\n\t\t\tnew_word = self.word[2:] + self.word[:2] + \"ay\"\n\n\t\tprint(new_word)",
"def _step1a(self, word):\n # this NLTK-only rule extends the original algorithm, so\n # that 'flies'->'fli' but 'dies'->'die' etc\n if self.mode == self.NLTK_EXTENSIONS:\n if word.endswith(\"ies\") and len(word) == 4:\n return self._replace_suffix(word, \"ies\", \"ie\")\n\n return self._apply_rule_list(\n word,\n [\n (\"sses\", \"ss\", None), # SSES -> SS\n (\"ies\", \"i\", None), # IES -> I\n (\"ss\", \"ss\", None), # SS -> SS\n (\"s\", \"\", None), # S ->\n ],\n )",
"def transliterate(self):\n# print('call transliterator, input = '+str(self.syllables))\n word = ''\n if len(self.syllables) == 0:\n word = 'U'\n return word\n else:\n for s in self.syllables:\n i, f = s[0], s[-1]\n# print(i, f)\n try:\n variants = self.table[i][f].split(' ')\n except:\n word = 'N'\n return word\n if len(variants) > 1:\n if self.sex == 'M':\n word += variants[0]\n else:\n word += variants[1]\n else:\n word += variants[0]\n word = self.beginning_checker(word)\n word = self.beginning_liquid_checker(word)\n word = re.sub(r'··', r'·', word)\n return word",
"def stem(self, word):\n word = word.lower()\n\n step1_success = False\n\n # All acute accents are replaced by grave accents.\n word = (word.replace(u(\"\\xE1\"), u(\"\\xE0\"))\n .replace(u(\"\\xE9\"), u(\"\\xE8\"))\n .replace(u(\"\\xED\"), u(\"\\xEC\"))\n .replace(u(\"\\xF3\"), u(\"\\xF2\"))\n .replace(u(\"\\xFA\"), u(\"\\xF9\")))\n\n # Every occurrence of 'u' after 'q'\n # is put into upper case.\n for i in range(1, len(word)):\n if word[i - 1] == \"q\" and word[i] == \"u\":\n word = \"\".join((word[:i], \"U\", word[i + 1:]))\n\n # Every occurrence of 'u' and 'i'\n # between vowels is put into upper case.\n for i in range(1, len(word) - 1):\n if word[i - 1] in self.__vowels and word[i + 1] in self.__vowels:\n if word[i] == \"u\":\n word = \"\".join((word[:i], \"U\", word[i + 1:]))\n elif word[i] == \"i\":\n word = \"\".join((word[:i], \"I\", word[i + 1:]))\n\n r1, r2 = self._r1r2_standard(word, self.__vowels)\n rv = self._rv_standard(word, self.__vowels)\n\n # STEP 0: Attached pronoun\n for suffix in self.__step0_suffixes:\n if rv.endswith(suffix):\n if rv[-len(suffix) - 4:-len(suffix)] in (\"ando\", \"endo\"):\n word = word[:-len(suffix)]\n r1 = r1[:-len(suffix)]\n r2 = r2[:-len(suffix)]\n rv = rv[:-len(suffix)]\n\n elif (rv[-len(suffix) - 2:-len(suffix)] in\n (\"ar\", \"er\", \"ir\")):\n word = \"\".join((word[:-len(suffix)], \"e\"))\n r1 = \"\".join((r1[:-len(suffix)], \"e\"))\n r2 = \"\".join((r2[:-len(suffix)], \"e\"))\n rv = \"\".join((rv[:-len(suffix)], \"e\"))\n break\n\n # STEP 1: Standard suffix removal\n for suffix in self.__step1_suffixes:\n if word.endswith(suffix):\n if suffix == \"amente\" and r1.endswith(suffix):\n step1_success = True\n word = word[:-6]\n r2 = r2[:-6]\n rv = rv[:-6]\n\n if r2.endswith(\"iv\"):\n word = word[:-2]\n r2 = r2[:-2]\n rv = rv[:-2]\n\n if r2.endswith(\"at\"):\n word = word[:-2]\n rv = rv[:-2]\n\n elif r2.endswith((\"os\", \"ic\")):\n word = word[:-2]\n rv = rv[:-2]\n\n elif r2 .endswith(\"abil\"):\n word = word[:-4]\n rv = rv[:-4]\n\n elif (suffix in (\"amento\", \"amenti\",\n \"imento\", \"imenti\") and\n rv.endswith(suffix)):\n step1_success = True\n word = word[:-6]\n rv = rv[:-6]\n\n elif r2.endswith(suffix):\n step1_success = True\n if suffix in (\"azione\", \"azioni\", \"atore\", \"atori\"):\n word = word[:-len(suffix)]\n r2 = r2[:-len(suffix)]\n rv = rv[:-len(suffix)]\n\n if r2.endswith(\"ic\"):\n word = word[:-2]\n rv = rv[:-2]\n\n elif suffix in (\"logia\", \"logie\"):\n word = word[:-2]\n rv = word[:-2]\n\n elif suffix in (\"uzione\", \"uzioni\",\n \"usione\", \"usioni\"):\n word = word[:-5]\n rv = rv[:-5]\n\n elif suffix in (\"enza\", \"enze\"):\n word = \"\".join((word[:-2], \"te\"))\n rv = \"\".join((rv[:-2], \"te\"))\n\n elif suffix == u(\"it\\xE0\"):\n word = word[:-3]\n r2 = r2[:-3]\n rv = rv[:-3]\n\n if r2.endswith((\"ic\", \"iv\")):\n word = word[:-2]\n rv = rv[:-2]\n\n elif r2.endswith(\"abil\"):\n word = word[:-4]\n rv = rv[:-4]\n\n elif suffix in (\"ivo\", \"ivi\", \"iva\", \"ive\"):\n word = word[:-3]\n r2 = r2[:-3]\n rv = rv[:-3]\n\n if r2.endswith(\"at\"):\n word = word[:-2]\n r2 = r2[:-2]\n rv = rv[:-2]\n\n if r2.endswith(\"ic\"):\n word = word[:-2]\n rv = rv[:-2]\n else:\n word = word[:-len(suffix)]\n rv = rv[:-len(suffix)]\n break\n\n # STEP 2: Verb suffixes\n if not step1_success:\n for suffix in self.__step2_suffixes:\n if rv.endswith(suffix):\n word = word[:-len(suffix)]\n rv = rv[:-len(suffix)]\n break\n\n # STEP 3a\n if rv.endswith((\"a\", \"e\", \"i\", \"o\", u(\"\\xE0\"), u(\"\\xE8\"),\n u(\"\\xEC\"), u(\"\\xF2\"))):\n word = word[:-1]\n rv = rv[:-1]\n\n if rv.endswith(\"i\"):\n word = word[:-1]\n rv = rv[:-1]\n\n # STEP 3b\n if rv.endswith((\"ch\", \"gh\")):\n word = word[:-1]\n\n word = word.replace(\"I\", \"i\").replace(\"U\", \"u\")\n return word",
"def stem(self, word):\n word = word.lower()\n\n if word in self.stopwords:\n return word\n\n step1_success = False\n\n r1, r2 = self._r1r2_standard(word, self.__vowels)\n rv = self._rv_standard(word, self.__vowels)\n\n # STEP 0: Attached pronoun\n for suffix in self.__step0_suffixes:\n if not (word.endswith(suffix) and rv.endswith(suffix)):\n continue\n\n if (\n rv[: -len(suffix)].endswith(\n (\n \"ando\",\n \"ar\",\n \"er\",\n \"iendo\",\n \"ir\",\n )\n )\n ) or (\n rv[: -len(suffix)].endswith(\"yendo\")\n and word[: -len(suffix)].endswith(\"uyendo\")\n ):\n\n word = self.__replace_accented(word[: -len(suffix)])\n r1 = self.__replace_accented(r1[: -len(suffix)])\n r2 = self.__replace_accented(r2[: -len(suffix)])\n rv = self.__replace_accented(rv[: -len(suffix)])\n break\n\n # STEP 1: Standard suffix removal\n for suffix in self.__step1_suffixes:\n if not word.endswith(suffix):\n continue\n\n if suffix == \"amente\" and r1.endswith(suffix):\n step1_success = True\n word = word[:-6]\n r2 = r2[:-6]\n rv = rv[:-6]\n\n if r2.endswith(\"iv\"):\n word = word[:-2]\n r2 = r2[:-2]\n rv = rv[:-2]\n\n if r2.endswith(\"at\"):\n word = word[:-2]\n rv = rv[:-2]\n\n elif r2.endswith((\"os\", \"ic\", \"ad\")):\n word = word[:-2]\n rv = rv[:-2]\n\n elif r2.endswith(suffix):\n step1_success = True\n if suffix in (\n \"adora\",\n \"ador\",\n \"acion\",\n \"adoras\",\n \"adores\",\n \"aciones\",\n \"ante\",\n \"antes\",\n \"ancia\",\n \"ancias\",\n ):\n word = word[: -len(suffix)]\n r2 = r2[: -len(suffix)]\n rv = rv[: -len(suffix)]\n\n if r2.endswith(\"ic\"):\n word = word[:-2]\n rv = rv[:-2]\n\n elif suffix in (\"logia\", \"logias\"):\n word = suffix_replace(word, suffix, \"log\")\n rv = suffix_replace(rv, suffix, \"log\")\n\n elif suffix in (\"ucion\", \"uciones\"):\n word = suffix_replace(word, suffix, \"u\")\n rv = suffix_replace(rv, suffix, \"u\")\n\n elif suffix in (\"encia\", \"encias\"):\n word = suffix_replace(word, suffix, \"ente\")\n rv = suffix_replace(rv, suffix, \"ente\")\n\n elif suffix == \"mente\":\n word = word[: -len(suffix)]\n r2 = r2[: -len(suffix)]\n rv = rv[: -len(suffix)]\n\n if r2.endswith((\"ante\", \"able\", \"ible\")):\n word = word[:-4]\n rv = rv[:-4]\n\n elif suffix in (\"idad\", \"idades\"):\n word = word[: -len(suffix)]\n r2 = r2[: -len(suffix)]\n rv = rv[: -len(suffix)]\n\n for pre_suff in (\"abil\", \"ic\", \"iv\"):\n if r2.endswith(pre_suff):\n word = word[: -len(pre_suff)]\n rv = rv[: -len(pre_suff)]\n\n elif suffix in (\"ivo\", \"iva\", \"ivos\", \"ivas\"):\n word = word[: -len(suffix)]\n r2 = r2[: -len(suffix)]\n rv = rv[: -len(suffix)]\n if r2.endswith(\"at\"):\n word = word[:-2]\n rv = rv[:-2]\n else:\n word = word[: -len(suffix)]\n rv = rv[: -len(suffix)]\n break\n\n # STEP 2a: Verb suffixes beginning 'y'\n if not step1_success:\n for suffix in self.__step2a_suffixes:\n if rv.endswith(suffix) and word[-len(suffix) - 1 : -len(suffix)] == \"u\":\n word = word[: -len(suffix)]\n rv = rv[: -len(suffix)]\n break\n\n # STEP 2b: Other verb suffixes\n for suffix in self.__step2b_suffixes:\n if rv.endswith(suffix):\n word = word[: -len(suffix)]\n rv = rv[: -len(suffix)]\n if suffix in (\"en\", \"es\", \"eis\", \"emos\"):\n if word.endswith(\"gu\"):\n word = word[:-1]\n\n if rv.endswith(\"gu\"):\n rv = rv[:-1]\n break\n\n # STEP 3: Residual suffix\n for suffix in self.__step3_suffixes:\n if rv.endswith(suffix):\n word = word[: -len(suffix)]\n if suffix in (\"e\", \"\\xE9\"):\n rv = rv[: -len(suffix)]\n\n if word[-2:] == \"gu\" and rv.endswith(\"u\"):\n word = word[:-1]\n break\n\n word = self.__replace_accented(word)\n\n return word",
"def stem(s):\n special = {'appall', 'kill', 'stroll', 'kiss', 'thrill', 'chugg', 'dress', 'err', 'express', 'fall', 'free', 'gall', 'add','cross', 'impress', 'inn', 'call', 'ball', 'bill', 'buzz'} \n ie_words = {'vying', 'lying', 'dying', 'tying'}\n short_ing = {'bring','sling','sping', 'bring', 'sing', 'ring', 'king', 'cling' ,'fling', 'wing', 'ding', 'ping', 'ting'}\n c_k_words = {'kick', 'muck', 'lock','pick', 'back', 'mock', 'peck', 'lock', 'nick'}\n\n if len(s) <= 3:\n return s\n if s[-3:] == 'ing' or s[-4:] == 'ings': \n if s in short_ing:\n return s\n elif s in special:\n return s[:-3]\n elif s[:-3] not in special and s[-4] == s[-5]:\n return s[:-4]\n elif s[:-3] not in c_k_words and s[-4] == 'k':\n return s[:-4]\n elif s == 'everything' or s == 'anything' or s == 'something':\n return s[:-5]\n elif s in ie_words:\n return s[0] + 'ie'\n else:\n return s[:-3]\n elif s[-3:] == 'ers':\n return s[:-3]\n elif s[-2:] == 'es':\n return s[:-2]\n elif s[-2:] == 'en':\n return s[:-2]\n elif s[-2:] == 'er':\n if s[-3] == s[-4]:\n return s[:-3]\n else:\n return s[:-2] \n elif s[-2:] == 'ed':\n if s[-3] == s[-4]:\n return s[:-3]\n else:\n return s[:-2]\n elif s[-3:] == 'ies':\n return s[:-2]\n elif s[-1:] == 's':\n return s[:-1]\n elif s[-1:] == 'e' and s not in ie_words:\n return s[:-1]\n elif s[-3:] == 'ful':\n return s[:-3]\n elif s[:2] == 'de':\n return s[2:]\n elif len(s) > 4 and s[-4:] == 'able' or s[-4] == 'ible':\n return s[:-4]\n elif s[:2] == 'in' or s[:2] == 'il' or s[:2] == 'ir':\n return s[2:]\n elif s[-1:] == 'y':\n return s[:-1] + 'i'\n else:\n return s",
"def pig_word(self, original):\n word = original.lower()\n if word[0] in \"aeiou\":\n new_word = word + 'ay'\n else:\n new_word = word[1:] + word[0] + 'ay'\n return new_word",
"def main():\n while True:\n #get input string\n input_string = input(\"Enter a word to translate: \")\n #check first letter\n if input_string[0].lower() in CONSONANTS:\n #if consonant remove first letter and add -ay\n pig_latin = input_string[1:]\n pig_latin = pig_latin + input_string[0] + \"ay\"\n elif input_string[0].lower() in VOWELS:\n #else add -way\n pig_latin = input_string + \"way\"\n #print word\n print(f\"\\n\\n{pig_latin}\\n\\n\")\n #ask to stop\n stop = input(\"Translate another? (Enter else 'n' to quit) : \")\n #if 'n' break\n if stop.lower() == 'n':\n break\n input(\"Press enter to quit: \")",
"def gibber(self): \n for x in self.consonants:\n if (x in self.sentence):\n \t self.sentence = self.sentence.replace(x, x+'o'+unicode(x).lower())",
"def pig_latinify(word):\n\n first_letter = word[0]\n\n if first_letter in VOWELS:\n output_word = word + \"yay\"\n else:\n #scan for vowel if word starts with a consonant\n for i in range(len(word)):\n individual_letter = word[i]\n if individual_letter in VOWELS:\n output_word = word[i:] + word[:i] + \"ay\"\n break\n else:\n continue\n\n return output_word",
"def rule_uppercase_i(words):\n for i in range(0, len(words)):\n if words[i].text == 'i':\n words[i].text = 'I'\n return words",
"def pig_latin(phrase):\n\n\n # loop over each word in the phrase\n # in word[0] starts with aeiou\n # add yay to the end of that word\n # if word[0] starts with non aeiou\n # move word[0] to the end and add ay\n\n result = []\n\n for word in phrase.split():\n\n if word[0] in 'aeiou':\n\n result.append(word + 'yay')\n\n else:\n\n result.append(word[1:] + word[0] + 'ay')\n\n return \" \".join(result)",
"def fry(word):\n\n # looks for a Y or y which will be (captured) followed and ended by an 'ou'\n match_you = re.match('([Yy])ou$', word)\n\n # First group will be the (captured) group so either 'Y' or 'y'\n if match_you:\n return match_you.group(1) + \"'all\"\n\n # looks for anyword ending in 'ing'\n match_ing = re.search('(.+)ing$', word)\n\n # checks if vowel exists before the 'ing'\n if match_ing:\n vowel_check = re.search('[aeiouy]', match_ing.group(1))\n # First group will be the (captured) group so everything before the 'ing'\n if vowel_check:\n return match_ing.group(1) + \"in'\"\n\n return word",
"def translate_leet(phrase):",
"def verb_lemma(word):\n if word.endswith(\"ed\"):\n if word[:-2].endswith(\"v\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"at\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"it\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"et\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"ut\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"ac\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"i\"):\n return word[:-3].lower() + \"y\"\n elif word[:-2].endswith(\"ir\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"ag\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"nc\"):\n return word[:-2].lower() + \"e\"\n elif word[:-2].endswith(\"nu\"):\n return word[:-2].lower() + \"e\"\n else:\n return word[:-2].lower() \n elif word.endswith(\"ing\"):\n if word[:-3].endswith(\"v\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"at\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"it\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"et\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"ut\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"ac\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"i\"):\n return word[:-4].lower() + \"y\"\n elif word[:-3].endswith(\"ir\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"ag\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"nc\"):\n return word[:-3].lower() + \"e\"\n elif word[:-3].endswith(\"nu\"):\n return word[:-3].lower() + \"e\"\n else:\n return word[:-3].lower()\n elif re.match(r\"(does|did|done)\", word):\n return (\"do\")\n elif re.match(r\"(is|are|am|was|will|were|been)\", word):\n return (\"be\")\n elif word == (\"'s\"):\n return (\"be\")\n elif re.match(r\"(had|has|'ve)\", word):\n return (\"have\")\n else:\n return word.lower()",
"def stem(s):\n if len(s)>4:\n if s[-4:] in 'able ency ship':\n s = s[:-4]\n elif s[-3:] in 'ing ily ies':\n \n if s[-3:] == 'ing':\n s = s[:-3]\n else:\n s = s[:-3] + 'y'\n \n \n elif s[-2:] in 'ed er es ly':\n s = s[:-2]\n if s[-1]=='i':\n s=s[:-1] + 'y'\n\n\n elif s[-1] in 's':\n s = s[:-1]\n\n if s[-1] == 's' or s[-2:] == 'ed' or s[-2:] == 'er' or s[-2:] == 'es' \\\n or s[-2:] == 'ly' or s[-3:]=='ing' or s[-3:]=='ily' or s[-3:]=='ies'\\\n or s[-4:] == 'able' or s[-4:] == 'ency' or s[-4:] == 'ship':\n stem_rest= stem(s)\n else:\n stem_rest=s\n\n return stem_rest\n else:\n return s",
"def main():\n print(\"Translate English into Pig Latin.\\n\")\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~\\n\")\n while True:\n #get input string\n input_string = input(\"Enter a word to translate: \").lower()\n #split string into words array\n words = []\n start = 0\n end = 0\n while end != -1:\n end = input_string.find(' ', start)\n words.append(input_string[start:end])\n start = end + 1\n\n pig_latin = []\n for word in words:\n #check first letter\n if word[0] in CONSONANTS:\n #if consonant remove first letter and add -ay\n pig_latin_word = word[1:]\n pig_latin_word = pig_latin_word + word[0] + \"ay\"\n elif word[0] in VOWELS:\n #else add -way\n pig_latin_word = word + \"way\"\n pig_latin.append(pig_latin_word)\n\n print(\"\\n\\n\")\n for word in pig_latin:\n #print word\n print(word + \" \", end = '')\n print(\"\\n\\n\")\n\n #ask to stop\n stop = input(\"Translate another? (Enter else 'n' to quit) : \")\n #if 'n' break\n if stop.lower() == 'n':\n break\n input(\"Press enter to quit: \")",
"def pigLatinTranslator(word):\n\n vowels = \"aeiouAEIOU\"\n word = str(word)\n if not word.isalpha():\n return \"Please submit a single word.\"\n elif len(word) < 2:\n return \"Please submit a longer word.\"\n else:\n if word[0] in vowels:\n return word + \"yay\"\n for letter in word:\n word = word[1:] + word[0]\n if word[0] in vowels:\n return word + \"ay\"\n return word[1:] + word[0] + \"ay\"",
"def cleaning(string, EOS=False):\n\n # before cleaning up, first identify end of the sentences (EOS)\n if EOS:\n pLu = '[{}]'.format(\"\".join([chr(i) for i in range(sys.maxunicode) if chr(i).isupper()]))\n EOS = re.compile(r'([a-z]+|[ş|ı])(\\. )((' + pLu + '[a-z]?)|([0-9]+))')\n string = EOS.sub(r'\\1#\\3', string)\n\n # period at the end of the sentences are being replaced with hastag (#)\n string = string.lower()\n mapping = {}\n mapping['99_807'] = 231\n mapping['105_770'] = 105\n mapping['117_770'] = 117\n mapping['105_775'] = 105\n mapping['117_776'] = 252\n mapping['115_807'] = 351\n mapping['103_774'] = 287\n mapping['97_770'] = 97\n mapping['111_776'] = 246\n mapping['97_785'] = 97\n Alist = {97, 99, 103, 105, 111, 115, 117}\n solv_prob = []\n flag = False\n for i, c in enumerate(string):\n if flag:\n flag = False\n continue # pass this character\n if not ord(c) in Alist:\n solv_prob.append(c) # no need to check this character\n else:\n if i == len(string) - 1:\n continue\n cn = string[i + 1] # next character\n key = '{}_{}'.format(ord(c), ord(cn)) # creating string with their ordinal\n if key in mapping.keys(): # cheking if this is to be mapped\n solv_prob.append(chr(mapping[key])) # append the mapped character to the list\n flag = True # raising flag to pass next character\n continue\n else:\n solv_prob.append(c)\n\n data = ''.join(solv_prob)\n data = data.replace('iğdır', 'ığdır')\n data = data.replace('irak', 'ırak')\n # Data= [d if len(d) > 0 else '#' for d in data.splitlines()] # removing empty lines\n return data",
"def profanity_word_handler(word):\n return word[0] + ''.join([settings.CENSOR_PROFANITY_REPLACEMENT_CHARACTER for I in range(len(word)-2)]) + word [-1]",
"def celex_diphthong_sub(word):\n word = re.sub(\"2\", \"#I\", word)\n word = re.sub(\"4\", \"QI\", word)\n word = re.sub(\"6\", \"#U\", word)\n word = re.sub(\"7\", \"I@\", word)\n word = re.sub(\"8\", \"E@\", word)\n word = re.sub(\"9\", \"U@\", word)\n return word",
"def makePigLatin(word): \n m = len(word)\n vowels = \"a\", \"e\", \"i\", \"o\", \"u\", \"y\" \n # short words are not converted \n if m<3 or word==\"the\":\n return word\n else:\n for i in vowels:\n if word.find(i) < m and word.find(i) != -1:\n m = word.find(i)\n if m==0:\n return word+\"way\" \n else:\n return word[m:]+word[:m]+\"ay\"",
"def convert_consonantal_i(self, word) -> str:\n match = list(self.consonantal_i_matcher.finditer(word))\n if match:\n if word[0].isupper():\n return \"J\" + word[1:]\n return \"j\" + word[1:]\n return word",
"def verb_stem(s):\n \n #If the stem is have, its 3s form is has.\n if s == \"has\" :\n return \"have\"\n\n #If the stem ends in y preceded by a vowel, simply add s (pays, buys).\n elif re.match(r\"[A-z]+[aeiou][y]s\\b\", s):\n str = s[:-1]\n\n #If the stem ends in y preceded by a non-vowel and contains at least three letters, change the y to ies (flies, tries, unifies).\n elif re.match(r\"[A-z]+[^aeiou]ies\\b\", s):\n str = s[:-3] + 'y'\n\n #If the stem is of the form Xie where X is a single letter other than a vowel, simply add s (dies, lies, ties note that this doesnt account for unties).\n elif re.match(r\"[^aeiou]ies\\b\", s):\n str = s[:-1]\n\n #If the stem ends in o,x,ch,sh,ss or zz, add es (goes, boxes, attaches, washes, dresses, fizzes).\n elif re.match(r\"[A-z]+([ox]|[cs]h|[s]s|[z]z)es\\b\", s): \n str = s[:-2]\n\n #If the stem ends in se or ze but not in sse or zze, add s (loses, dazes, lapses, analyses).\n elif re.match(r\"[A-z]+([s][^s][e]|[z][^z][e])s\\b\", s):\n str = s[:-1]\n\n #If the stem ends in e not preceded by i,o,s,x,z,ch,sh, just add s (likes, hates, bathes).\n elif re.match(r\"[A-z]+([^iosxz]|[^ch]|[^sh])es\\b\", s):\n str = s[:-1]\n \n #If the stem ends in anything except s,x,y,z,ch,sh or a vowel, add s (eats, tells, shows)\n elif re.match(r\"[A-z]+([^sxyzaeiou]|[^cs]h)s\\b\", s):\n str = s[:-1]\n\n else: \n str = \"\"\n\n\n matches = [(w, t) for (w, t) in vb_list if (w == s or w == str)]\n\n tag_s = [(w, t) for (w, t) in matches if w == s and t == 'VBZ']\n\n if tag_s == True:\n return str\n else:\n tag_str = [t for (w, t) in matches if w == str and t == 'VB']\n\n if not (tag_s or tag_str):\n str = \"\"\n\n return str"
] | [
"0.7266689",
"0.66175455",
"0.63900864",
"0.61191475",
"0.6068698",
"0.60429436",
"0.5993422",
"0.5989975",
"0.5857056",
"0.5811782",
"0.5710952",
"0.57005095",
"0.56973",
"0.56643087",
"0.5558673",
"0.5556823",
"0.55546314",
"0.5542172",
"0.54957694",
"0.54321015",
"0.54018617",
"0.53842527",
"0.534732",
"0.5329865",
"0.53286153",
"0.5297672",
"0.52960604",
"0.5293738",
"0.5273995",
"0.5245557"
] | 0.80976295 | 0 |
equ_type correpsonds to the WavelengthSolutionFunctions coeffs correponds to the function but for a polynomial coeffs = c y = c[0]+c[1]x+c[2]x2+c[3]c3+.... extra extra info from the extraction process | def __init__ (self, equ_type='none' , extra='none'):
self.equ_type = self.set_equation_type(equ_type)
self.coeffs = []
self.extra = str(extra) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def nth_order_const_coeff(*coeffs: List[Symbol], t: Symbol = t) -> Tuple[List[Symbol], Procedure]:\n\n # First, set up characteristic equation.\n char_eq_r, r = sympy.S.Zero, Dummy('r')\n\n for order, coeff in enumerate(coeffs[::-1]):\n char_eq_r += coeff * r ** order\n\n char_eq = Poly(char_eq_r, r)\n\n # Can't just call roots because it doesn't return rootof for unsolveable\n # polynomials.\n char_eq_roots = roots(char_eq, multiple=True)\n\n root_dict = defaultdict(int) # type: Dict[int, int]\n\n conjugate_roots = []\n for root in char_eq_roots:\n root_dict[root] += 1\n\n sols = []\n for root, mult in root_dict.items():\n for i in range(mult):\n if isinstance(root, RootOf):\n sols.append(t**i * exp(root*t))\n elif root.is_real:\n sols.append(t**i*exp(root*t))\n else:\n if root in conjugate_roots:\n continue\n reroot = re(root)\n imroot = im(root)\n conjugate_roots.append(conjugate(root))\n sols.append(t**i*exp(reroot*t) * sin(abs(imroot) * t))\n sols.append(t**i*exp(reroot*t) * cos(imroot * t))\n\n # collect roots for display\n p_roots = []\n count = 1\n for root, mult in root_dict.items():\n p_roots.append(Eq(Dummy('r_{}'.format(\n \",\".join([str(i) for i in range(count, count + mult)]))), root, evaluate=False))\n count += mult\n\n procedure = Procedure()\n procedure\\\n .text('Characteristic equation: ', nl=True)\\\n .eq(Eq(char_eq_r, 0, evaluate=False))\\\n .text('Roots: ')\\\n .equarr(p_roots)\\\n .text('General Solution: ', nl=True)\\\n .eq(Eq(Dummy('y'), to_general(sols)[0], evaluate=False))\n\n return sols, procedure",
"def findxcor(xarr, farr, swarr, sfarr, ws, dcoef=None, ndstep=20, best=False,\n inttype='interp', debug=False):\n\n # cross-correlate the spectral lines and the observed fluxes in order to\n # refine the solution\n try:\n nws = copy.deepcopy(ws)\n except:\n nws = WavelengthSolution.WavelengthSolution(\n ws.x, ws.wavelength, ws.model)\n\n # create the range of coefficents\n if dcoef is None:\n dcoef = ws.coef * 0.0 + 1.0\n\n dlist = mod_coef(ws.coef, dcoef, 0, ndstep)\n # loop through them and deteremine the best cofficient\n cc_arr = np.zeros(len(dlist), dtype=float)\n\n for i in range(len(dlist)):\n # set the coeficient\n nws.coef=dlist[i]\n \n # set the wavelegnth coverage\n warr = nws(xarr)\n\n # resample the artificial spectrum at the same wavelengths as the\n # observed spectrum\n asfarr = interpolate(\n warr, swarr, sfarr, type=inttype, left=0.0, right=0.0)\n\n # calculate the correlation value\n cc_arr[i] = ncor(farr, asfarr)\n #if debug: print cc_arr[i], \" \".join([\"%f\" % k for k in dlist[i]])\n\n # now set the best coefficients\n i = cc_arr.argmax()\n bcoef = dlist[i]\n nws.coef=bcoef\n if best:\n return nws\n\n # interpoloate over the values to determine the best value\n darr = np.array(dlist)\n for j in range(len(nws.coef)):\n if dcoef[j] != 0.0:\n i = cc_arr.argsort()[::-1]\n tk = np.polyfit(darr[:, j][i[0:5]], cc_arr[i[0:5]], 2)\n\n if tk[0] == 0:\n bval = 0\n else:\n bval = -0.5 * tk[1] / tk[0]\n\n # make sure that the best value is close\n if abs(bval - bcoef[j]) < 2 * dcoef[j] / ndstep:\n bcoef[j] = bval\n\n # coef=np.polyfit(dlist[:][j], cc_arr, 2)\n # nws.coef[j]=-0.5*coef[1]/coef[0]\n\n nws.coef=bcoef\n\n return nws",
"def integrate(equ):\n if \"x\" in equ:\n return polynomial_equation(equ)\n else:\n return constant_equation(equ)",
"def calculateElementCoefficients(self):\n #\n #get u,grad(u), and grad(u)Xgrad(w) at the quadrature points\n #\n for cj in range(self.nc):\n self.u[cj].getValues(self.q[('v',cj)],\n self.q[('u',cj)])\n if self.q.has_key(('grad(u)',cj)):\n self.u[cj].getGradientValues(self.q[('grad(v)',cj)],\n self.q[('grad(u)',cj)])\n #\n #get functions of (t,x,u) at the quadrature points\n #\n self.coefficients.evaluate(self.timeIntegration.t,self.q)\n log(\"Coefficients on element\",level=10,data=self.q)\n #\n # time integration is handled directly in ELLAM weak approximation, don't have a hook for\n # doing that via a time integration object (could if it were a direct Lagrange Galerkin formulation I believe)\n # however, need to set time integration's m_tmp if use that anywhere\n #if self.timeTerm:\n # self.timeIntegration.calculateElementCoefficients(self.q)\n\n #todo eventually can add nonlinear potential here\n\n #cek and mwf need to go through this section to clean up, some of next two blocks could go to calcQuad\n #\n #todo need non-diagonal dependence?\n for ci in range(self.nc):\n cfemIntegrals.calculateCFLADR(self.elementEffectiveDiametersArray,\n self.q[('dm',ci,ci)],\n self.q[('df',ci,ci)],#could just be velocity\n self.q[('cfl',ci)])",
"def coef_val():\n\n basepath = path.join(path.dirname(path.realpath('__file__')), 'data')\n fdata = basepath + path.sep + 'VAWTPolySurfaceCoef_pub.csv' # published coefficients from paper\n # fdata = basepath + path.sep + 'VAWTPolySurfaceCoef.csv' # polynomial surface fitting coefficients\n\n loc1 = np.zeros(10)\n loc2 = np.zeros(10)\n loc3 = np.zeros(10)\n spr1 = np.zeros(10)\n spr2 = np.zeros(10)\n skw1 = np.zeros(10)\n skw2 = np.zeros(10)\n scl1 = np.zeros(10)\n scl2 = np.zeros(10)\n scl3 = np.zeros(10)\n\n f = open(fdata)\n csv_f = csv.reader(f)\n\n i = 0\n for row in csv_f:\n if i != 0:\n loc1[i-1] = float(row[0])\n loc2[i-1] = float(row[1])\n loc3[i-1] = float(row[2])\n spr1[i-1] = float(row[3])\n spr2[i-1] = float(row[4])\n skw1[i-1] = float(row[5])\n skw2[i-1] = float(row[6])\n scl1[i-1] = float(row[7])\n scl2[i-1] = float(row[8])\n scl3[i-1] = float(row[9])\n i += 1\n\n f.close()\n\n return loc1,loc2,loc3,spr1,spr2,skw1,skw2,scl1,scl2,scl3",
"def function_poly(coeffs, t, x, y, lc, coeffs_dict, coeffs_tuple, fix_coeffs, batman_params, poly_params, eclipse = False):\n new_flux = model_poly(coeffs, t, x, y, coeffs_dict, coeffs_tuple, fix_coeffs, batman_params, poly_params, eclipse = eclipse)\n return lc - new_flux",
"def test_get_ext_coeffs(self, spec, order, coeff, expected):\n zeta, j = sp.symbols('zeta, j')\n\n # Substitutions needed\n test_subs = [(x_a[i], 1-order//2+i) for i in range(order//2)]\n test_subs += [(x_t, j), (x_b, zeta)]\n\n bcs = BoundaryConditions(spec, order)\n\n coeffs = get_ext_coeffs(bcs)[order//2]\n\n expected_coeff = sp.sympify(expected, locals={'zeta': zeta, 'j': j})\n\n generated_coeff = coeffs[E[coeff]].subs(test_subs)\n\n assert sp.simplify(generated_coeff - expected_coeff) == 0",
"def _func_pen(self, coeffs_ext):\n l_elastic_net = self.l_elastic_net\n eta = self.eta\n n_features = self.n_features\n coeffs = coeffs_ext[:n_features] - coeffs_ext[n_features:]\n return l_elastic_net * ((1. - eta) * coeffs_ext.sum()\n + 0.5 * eta * np.linalg.norm(coeffs) ** 2)",
"def _extract_coefficients(self, expr):\n\n theta_s = sp.Symbol('theta_s')\n\n N_fn = self.SRF.ncoefs + self.V.ncoefs - 1\n\n fn = []\n\n # find f_0 coefficient\n repl0 = dict([[sp.cos(theta_s), 0]])\n fn = fn + [expr.xreplace(repl0)]\n\n # find f_1 coefficient\n repl1 = dict([[sp.cos(theta_s)**i, 0] for i in list(range(N_fn, 0, -1))\n if i != 1] + [[sp.cos(theta_s), 1]])\n fn = fn + [expr.xreplace(repl1) - fn[0]]\n\n for n in np.arange(2, N_fn, dtype=int):\n repln = dict([[sp.cos(theta_s)**int(n), 1]])\n fn = fn + [(expr.xreplace(repln)).xreplace(repl0) - fn[0]]\n\n# # alternative way of extracting the coefficients:\n# theta_s = sp.Symbol('theta_s')\n# # collect terms with equal powers of cos(theta_s)\n# expr_sort = sp.collect(expr, sp.cos(theta_s), evaluate=False)\n#\n# # convert generated dictionary to list of coefficients\n# # the use of .get() is necessary for getting the dict-values since\n# # otherwise coefficients that are actually 0. would not appear\n# # in the list of fn-coefficients\n#\n# fn = [expr_sort.get(sp.cos(theta_s) ** n, 0.)\n# for n in range(self.SRF.ncoefs + self.V.ncoefs - 1)]\n\n return fn",
"def trans_coeffs(file_name, exci_info):\n ntrans_a = exci_info.nocc_a * exci_info.nvirt_a\n ntrans_b = exci_info.nocc_b * exci_info.nvirt_b\n ntrans_tot = ntrans_a + ntrans_b\n\n raw_array = read_gauss_rwf(file_name)\n # list of transition coefficients. Each element is 1 excitation\n exci_coeffs_add = []\n exci_coeffs_sub = []\n\n for i_ex in range(exci_info.nex):\n a_start = i_ex * ntrans_tot + 12\n a_end = i_ex * ntrans_tot + ntrans_a + 12\n b_start = i_ex * ntrans_tot + ntrans_a + 12\n b_end = i_ex * ntrans_tot + ntrans_tot + 12\n\n trans_a = raw_array[a_start:a_end]\n trans_b = raw_array[b_start:b_end]\n\n exci_coeffs_add.append(np.array([trans_a,trans_b]))\n\n for i_ex in range(exci_info.nex):\n n_skip = int((len(raw_array)-12-exci_info.nex)/2)\n a_start = i_ex * ntrans_tot + 12 + n_skip\n a_end = i_ex * ntrans_tot + ntrans_a + 12 + n_skip\n b_start = i_ex * ntrans_tot + ntrans_a + 12 + n_skip\n b_end = i_ex * ntrans_tot + ntrans_tot + 12 + n_skip\n\n trans_a = raw_array[a_start:a_end]\n trans_b = raw_array[b_start:b_end]\n\n exci_coeffs_sub.append(np.array([trans_a,trans_b]))\n\n ket_x_lis = []\n ket_y_lis = []\n\n for i,j in zip(exci_coeffs_add,exci_coeffs_sub):\n ket_x_lis.append((i[0]+j[0])/2)\n ket_y_lis.append((i[0]-j[0])/2)\n\n ket_x = np.array(ket_x_lis)\n ket_y = np.array(ket_y_lis)\n\n return ket_x, ket_y",
"def nircam_get_polynomial_forward(apName, siaf_aperture_definitions, coldfit_name_mapping, coldfit_source_data, makeplot=False, test=False, verbose=False):\n \n aperture_name_list = siaf_aperture_definitions['AperName'].tolist()\n\n for row, name in enumerate(aperture_name_list):\n if name == apName:\n r = row\n\n # read in hardcoded dictionary\n apSys = coldfit_name_mapping[apName]\n xref = siaf_aperture_definitions['XDetRef'][r]\n yref = siaf_aperture_definitions['YDetRef'][r]\n if xref == '':\n xref = 1024.5 # Allow for COMPOUND types\n else:\n xref = float(xref) # with no pixel information\n if yref == '':\n yref = 1024.5\n else:\n yref = float(yref)\n\n print(apName, xref, yref)\n\n order = 5 # polynomial order\n terms = (order + 1) * (order + 2) // 2 # number of poly coeffs\n A = np.zeros((terms))\n B = np.zeros((terms))\n\n part1 = False\n part2 = False\n\n # read parameters from cold_fit_[] file\n for line in coldfit_source_data:\n column = line.split(',')\n modelname = column[0].strip()\n if modelname==apSys['pixels_to_mm']:\n a0 = float(column[7])\n a1 = float(column[9])\n a2 = float(column[8])\n b0 = float(column[28])\n b1 = float(column[30])\n b2 = float(column[29])\n part1 = True\n if verbose:\n print('a', a0, a1, a2)\n print('b', b0, b1, b2)\n\n # find transformation to OTESKY\n if modelname==apSys['mm_to_degrees']:\n for i in range(terms):\n A[i] = float(column[i + 7])\n B[i] = float(column[i + 28])\n (A1, B1) = polynomial.reorder(A, B)\n part2 = True\n if verbose:\n print(' Before combining')\n print('A1')\n polynomial.print_triangle(A1)\n print('B1')\n polynomial.print_triangle(B1)\n\n if not (part1 and part2):\n print('Incomplete Transform')\n return\n\n # Combine transformations\n delta = a1 * b2 - a2 * b1\n alpha = (b2 * a0 - a2 * b0) / delta\n beta = (-b1 * a0 + a1 * b0) / delta\n AT = polynomial.transform_coefficients(A1, a1, a2, b1, b2)\n BT = polynomial.transform_coefficients(B1, a1, a2, b1, b2)\n ATS = polynomial.shift_coefficients(AT, alpha, beta)\n BTS = polynomial.shift_coefficients(BT, alpha, beta)\n\n # Generate polynomials in terms of V2V3 in arcsec\n AF = 3600 * ATS\n BF = -3600 * BTS\n BF[0] = BF[0] - 468.0\n\n if makeplot:\n # Plot axes in V2V3 coords\n (V20, V30) = (AF[0], BF[0])\n print('Bottom Left Corner', V20, V30)\n V2x = polynomial.poly(AF, 2048.0, 0.0)\n V3x = polynomial.poly(BF, 2048.0, 0.0)\n V2y = polynomial.poly(AF, 0.0, 2048.0)\n V3y = polynomial.poly(BF, 0.0, 2048.0)\n V2opp = polynomial.poly(AF, 2048.0, 2048.0)\n V3opp = polynomial.poly(BF, 2048.0, 2048.0)\n V2c = polynomial.poly(AF, 1024.0, 1024.0)\n V3c = polynomial.poly(BF, 1024.0, 1024.0)\n print('Center', V2c, V3c)\n\n P.figure(1)\n # P.clf()\n P.plot((V2x, V20, V2y), (V3x, V30, V3y))\n P.plot((V2x, V2opp, V2y), (V3x, V3opp, V3y), linestyle='dashed')\n P.plot((V2c), (V3c), 'rx', ms=10.0)\n P.grid(True)\n P.axis('equal')\n P.text(V2x, V3x, 'X')\n P.text(V2y, V3y, 'Y')\n P.text(V2c, V3c, apName)\n P.title('NIRCam')\n P.xlabel('<---V2')\n P.ylabel('V3 --->')\n # V2 to the left\n (l, r) = P.xlim()\n P.xlim(r, l)\n\n # Shift to reference point (xref=1024.5, yref=1024.5 hardcoded for COMPOUND apertures. OK?)\n AFS = polynomial.shift_coefficients(AF, xref, yref)\n BFS = polynomial.shift_coefficients(BF, xref, yref)\n\n if test:\n xy = input('x y positions ')\n x = float(xy.split(',')[0])\n y = float(xy.split(',')[1])\n # Two step calculation\n xm = a0 + a1 * x + a2 * y\n ym = b0 + b1 * x + b2 * y\n xan = polynomial.poly(A1, xm, ym, 5)\n yan = polynomial.poly(B1, xm, ym, 5)\n v2 = 3600 * xan\n v3 = -3600 * (yan + 0.13)\n print('\\n Two step forward calculation')\n print(x, y, xm, ym, xan, yan)\n print(v2, v3)\n\n v21 = polynomial.poly(AF, x, y, 5)\n v31 = polynomial.poly(BF, x, y, 5)\n print('One step')\n print(v21, v31)\n\n xr = x - 1024.5\n yr = y - 1024.5\n v2r = polynomial.poly(AFS, xr, yr, 5)\n v3r = polynomial.poly(BFS, xr, yr, 5)\n print('Shifted')\n print(v2r, v3r)\n\n return (AFS, BFS)",
"def calibrate(self, master):\n if master.polyorder == 'linear':\n self.fitfunction = \"A0 + A1 * D\"\n self.fit_fkt = self.calc_lin\n elif master.polyorder == 'quadratic':\n self.fit_fkt = self.calc_quad\n self.fitfunction = \"A0 + A1 * D + A2 * D**2\"\n elif master.polyorder == \"cubic\":\n self.fitfunction = \"A0 + A1 * D + A2 * D**2 + A3 * D**3\"\n self.fit_fkt = self.calc_cubic\n else:\n print(\"Polynomgrad nicht definiert\")\n \n self.mw = np.asarray(self.mw)\n if master.sensortype == \"Druck\":\n self.best, self.covar = curve_fit(self.fit_fkt, self.mw, master.Referencedata.caldat)\n else:\n print(\"Sensortyp noch nicht Hinterlegt\")",
"def _get_coeffs(self,kind='real', convention=1, swithchcs=0):\n if kind=='real': \n return self.coeffs\n elif kind=='complex':\n return SHrtoc(self.coeffs, convention=convention, switchcs=switchcs)",
"def coefficients_from_Weierstrass_polynomial(f):\n R = f.parent()\n cubic_variables = [x for x in R.gens() if f.degree(x) == 3]\n quadratic_variables = [y for y in R.gens() if f.degree(y) == 2]\n try:\n x = cubic_variables[0]\n y = quadratic_variables[0]\n except IndexError:\n raise ValueError('polynomial is not in long Weierstrass form')\n\n a1 = a2 = a3 = a4 = a6 = 0\n x3 = y2 = None\n for coeff, mon in f:\n if mon == x**3:\n x3 = coeff\n elif mon == x**2:\n a2 = coeff\n elif mon == x:\n a4 = coeff\n elif mon == 1:\n a6 = coeff\n elif mon == y**2:\n y2 = -coeff\n elif mon == x*y:\n a1 = -coeff\n elif mon == y:\n a3 = -coeff\n else:\n raise ValueError('polynomial is not in long Weierstrass form')\n\n if x3 != y2:\n raise ValueError('the coefficient of x^3 and -y^2 must be the same')\n elif x3 != 1:\n a1, a2, a3, a4, a6 = a1/x3, a2/x3, a3/x3, a4/x3, a6/x3\n return [a1, a2, a3, a4, a6]",
"def y(self):\n #print(xdb.xray_lines(self.element)[self.line].energy/1000)\n #self.output_params={}\n #print(self.getBulkCon(self.element,self.botchem,self.botden))\n x = self.x + self.qoff\n if not self.__fit__:\n self.output_params['scaler_parameters']={}\n return self.fluCalFun(x)\n #return self.x",
"def evaluate(self,coeffs,evalpts):\n a1,a2,a3,A0,E0,G0,n = coeffs\n x = asarray(evalpts) #XXX: requires a numpy.array\n return (a1 + a2*x + a3*x*x + A0 * ( G0/(2*pi) )/( (x-E0)*(x-E0)+(G0/2)*(G0/2) ))/n",
"def print_coeffs(f,model,v_coeff=None,w_coeff=None,wrad_coeff=None,timezero=None,final=False):\n if model.ncosF>0:\n if final: print >>f,\"===== final v_coeff =====\"\n else: print >>f,\"===== v_coeff =====\"\n for i,val in enumerate(v_coeff):\n print >>f, \"%8d %13.5e\" %(i,val)\n if model.ncosD>0:\n if final: print >>f,\"===== final w_coeff =====\"\n else: print >>f,\"===== w_coeff =====\"\n print >>f, \"%8d %13.5e\" %(0,w_coeff[0]+model.wunit) # only the first needs to be shifted\n for i,val in enumerate(w_coeff[1:]):\n print >>f, \"%8d %13.5e\" %(i+1,val)\n if timezero is not None:\n if final: print >>f,\"===== final timezero =====\"\n else: print >>f,\"===== timezero =====\"\n print >>f, \"%13.5e\" %(timezero)\n if wrad_coeff is not None:\n if model.ncosDrad > 0:\n if final: print >>f,\"===== final wrad_coeff =====\"\n else: print >>f,\"===== wrad_coeff =====\"\n print >>f, \"%8d %13.5e\" %(0,wrad_coeff[0]+model.wradunit) # only the first needs to be shifted\n for i,val in enumerate(wrad_coeff[1:]):\n print >>f, \"%8d %13.5e\" %(i+1,val)\n print >>f, \"=\"*10",
"def nircam_get_polynomial_inverse(apName, siaf_aperture_definitions, coldfit_name_mapping, coldfit_source_data, verbose=False):\n\n aperture_name_list = siaf_aperture_definitions['AperName'].tolist()\n\n if verbose:\n print('Running inverse ...')\n for row, name in enumerate(aperture_name_list):\n if name == apName:\n if verbose:\n print('Found aperture {}'.format(apName))\n r = row\n apSys = coldfit_name_mapping[apName]\n xref = siaf_aperture_definitions['XDetRef'][r]\n yref = siaf_aperture_definitions['YDetRef'][r]\n if xref == '':\n xref = 1024.5 # Allow for COMPOUND types\n else:\n xref = float(xref) # with no pixel information\n if yref == '':\n yref = 1024.5\n else:\n yref = float(yref)\n\n order = 5\n terms = (order + 1) * (order + 2) // 2\n C = np.zeros((terms))\n D = np.zeros((terms))\n\n part1 = False\n part2 = False\n for line in coldfit_source_data: # coeffs read in during initialization\n column = line.split(',')\n modelname = column[0].strip()\n if modelname==apSys['mm_to_pixels']: # linear transformation\n c0 = float(column[7])\n c1 = float(column[9])\n c2 = float(column[8])\n d0 = float(column[28])\n d1 = float(column[30])\n d2 = float(column[29])\n if verbose:\n print('c', c0, c1, c2)\n print('d', d0, d1, d2)\n part1 = True\n\n if modelname==apSys['degrees_to_mm']: # Polynomial\n for i in range(terms):\n C[i] = float(column[i + 7])\n D[i] = float(column[i + 28])\n (C1, D1) = polynomial.reorder(C, D)\n\n if verbose:\n print('C1')\n polynomial.print_triangle(C1)\n print('D1')\n polynomial.print_triangle(D1)\n\n # Combination polynomials CF and DF transform\n # from XAN,YAN directly to x,y pixels.\n\n CS = polynomial.shift_coefficients(C1, 0.0, -0.13)\n DS = polynomial.shift_coefficients(D1, 0.0, -0.13)\n CV = np.zeros((terms))\n DV = np.zeros((terms))\n\n k = 0\n for i in range(order + 1):\n for j in range(i + 1):\n CV[k] = (-1) ** j * CS[k] / 3600.0 ** i\n DV[k] = (-1) ** j * DS[k] / 3600.0 ** i\n k += 1\n part2 = True\n\n\n if part1 and part2:\n CVF = c1 * CV + c2 * DV\n CVF[0] = CVF[0] + c0\n DVF = d1 * CV + d2 * DV\n DVF[0] = DVF[0] + d0\n # Shift to reference position\n CVF[0] = CVF[0] - xref\n DVF[0] = DVF[0] - yref\n else:\n print('Incomplete transform')\n\n return (CVF, DVF)",
"def c_coefficients(x1,x2,x3,y1,y2,y3,initial_slope,final_slope):\n\tC = c_matrix(x1,x2,x3)\n\ty = y_vector(x1,x2,x3,y1,y2,y3,initial_slope,final_slope)\n\tCCoefficients = np.dot(inv(C),y)\n\treturn(CCoefficients)",
"def transform_series_coeff(g, fc, R0=50, bw_ratio=0.1, flt_type='lowpass'):\r\n lst = []\r\n if flt_type == 'lowpass':\r\n val = R0*g/(2*math.pi*fc)\r\n lst.append({'ref_des':'L', 'value':val}) \r\n\r\n elif flt_type == 'highpass': \r\n val = 1.0/(R0*2*math.pi*fc*g) \r\n lst.append({'ref_des':'C', 'value':val}) \r\n\r\n elif flt_type == 'bandpass':\r\n val = R0*g/(2*math.pi*fc*bw_ratio)\r\n lst.append({'ref_des':'L', 'value':val}) \r\n val = bw_ratio/(R0*2*math.pi*fc*g)\r\n lst.append({'ref_des':'C', 'value':val}) \r\n \r\n elif flt_type == 'bandstop':\r\n val = R0*g*bw_ratio/(2*math.pi*fc)\r\n lst.append({'ref_des':'L', 'value':val}) \r\n val = 1.0/(R0*g*2*math.pi*fc*bw_ratio)\r\n lst.append({'ref_des':'C', 'value':val}) \r\n else:\r\n raise ValueError(\"not a valid filter type\")\r\n return lst",
"def printPolyCoeffs(lam) :\n ell = len(lam)\n useFormat = \"2.6e\"\n count = 0\n def printLine(s, count) :\n if lam[count] < 0 :\n s = s + 3 * \" \"\n else :\n s = s + 4 * \" \"\n s = s + \"{0:\" + useFormat + \"}\"\n print(s . format(lam[count]))\n count = count + 1\n return count\n if ell >= 1 :\n count = printLine(\"x0y0\", count)\n if ell >= 3 :\n count = printLine(\"x1y0\", count)\n count = printLine(\"x0y1\", count)\n if ell >= 6 :\n count = printLine(\"x2y0\", count)\n count = printLine(\"x1y1\", count)\n count = printLine(\"x0y2\", count)\n if ell >= 10 :\n count = printLine(\"x3y0\", count)\n count = printLine(\"x2y1\", count)\n count = printLine(\"x1y2\", count)\n count = printLine(\"x0y3\", count)\n if ell >= 15 :\n count = printLine(\"x4y0\", count)\n count = printLine(\"x3y1\", count)\n count = printLine(\"x2y2\", count)\n count = printLine(\"x1y3\", count)\n count = printLine(\"x0y4\", count)\n if ell >= 21 :\n count = printLine(\"x5y0\", count)\n count = printLine(\"x4y1\", count)\n count = printLine(\"x3y2\", count)\n count = printLine(\"x2y3\", count)\n count = printLine(\"x1y4\", count)\n count = printLine(\"x0y5\", count)\n if ell >= 28 :\n count = printLine(\"x6y0\", count)\n count = printLine(\"x5y1\", count)\n count = printLine(\"x4y2\", count)\n count = printLine(\"x3y3\", count)\n count = printLine(\"x2y4\", count)\n count = printLine(\"x1y5\", count)\n count = printLine(\"x0y6\", count)\n if ell >= 36 :\n count = printLine(\"x7y0\", count)\n count = printLine(\"x6y1\", count)\n count = printLine(\"x5y2\", count)\n count = printLine(\"x4y3\", count)\n count = printLine(\"x3y4\", count)\n count = printLine(\"x2y5\", count)\n count = printLine(\"x1y6\", count)\n count = printLine(\"x0y7\", count)\n if (ell > 36) or (ell < 1) :\n raise ValueError(\"Polynomial degree less than or equal to 7, please.\")",
"def oxy_dict(calib, P, K, T, S, V):\n\n \"\"\"Assumes all are arrays, or none are arrays. Need way to test for them. \"\"\"\n try:\n oxygen = []\n for P_x, K_x, T_x, S_x, V_x in zip(P, K, T, S, V):\n temp = (calib['Soc'] * (V_x + calib['offset'])\n * (1.0 + calib['A'] * T_x + calib['B'] * math.pow(T_x,2) + calib['C'] * math.pow(T_x,3) )\n * OxSol(T_x,S_x)\n * math.exp(calib['E'] * P_x / K_x)) #foo\n temp = round(temp,4)\n oxygen.append(temp)\n #Single mode.\n except:\n oxygen = (calib['Soc'] * (V + calib['offset'])\n * (1.0 + calib['A'] * T + calib['B'] * math.pow(T,2) + calib['C'] * math.pow(T,3) )\n * OxSol(T,S)\n * math.exp(calib['E'] * P / K))\n return oxygen",
"def __getPressureCalibrationCoefficients(self):\n src13 = self.read_byte_data(self.address, 0x13)\n src14 = self.read_byte_data(self.address, 0x14)\n src15 = self.read_byte_data(self.address, 0x15)\n src16 = self.read_byte_data(self.address, 0x16)\n src17 = self.read_byte_data(self.address, 0x17)\n src18 = self.read_byte_data(self.address, 0x18)\n src19 = self.read_byte_data(self.address, 0x19)\n src1A = self.read_byte_data(self.address, 0x1A)\n src1B = self.read_byte_data(self.address, 0x1B)\n src1C = self.read_byte_data(self.address, 0x1C)\n src1D = self.read_byte_data(self.address, 0x1D)\n src1E = self.read_byte_data(self.address, 0x1E)\n src1F = self.read_byte_data(self.address, 0x1F)\n src20 = self.read_byte_data(self.address, 0x20)\n src21 = self.read_byte_data(self.address, 0x21)\n c00 = (src13 << 12) | (src14 << 4) | (src15 >> 4)\n c00 = getTwosComplement(c00, 20)\n c10 = ((src15 & 0x0F) << 16) | (src16 << 8) | src17\n c10 = getTwosComplement(c10, 20)\n c20 = (src1C << 8) | src1D\n c20 = getTwosComplement(c20, 16)\n c30 = (src20 << 8) | src21\n c30 = getTwosComplement(c30, 16)\n c01 = (src18 << 8) | src19\n c01 = getTwosComplement(c01, 16)\n c11 = (src1A << 8) | src1B\n c11 = getTwosComplement(c11, 16)\n c21 = (src1E < 8) | src1F\n c21 = getTwosComplement(c21, 16)\n return c00, c10, c20, c30, c01, c11, c21",
"def coefC(x0,y0,x1,y1):\n return (x1*y0-x0*y1)/(x1-x0)",
"def IntegratePolynomialPartLogExt(poly, fieldTower):\n #print(poly)\n #print(poly.degree)\n #print(poly.fieldTower)\n if poly == 0 or poly.isZero():\n return Int.Integral()\n red = poly.reduceToLowestPossibleFieldTower()\n if red.fieldTower.towerHeight<fieldTower.towerHeight:\n return Integrate(red)\n p = poly.getCoefficients()\n l = len(p)-1\n q = [ZERO]*(len(p)+1)\n b = [ZERO]*(len(p)+1)\n d = [ZERO]*(len(p)+1)\n \n \n u = fieldTower.getLastExtension().argFunction # last fieldExtension with T=log(u)\n log_diff_u = u.logDifferentiate()#u.differentiate()/u # u'/u\n \n # p_l *T^l + p_(l-1)*T^(l-1) + ... + p_0 = (q_(l+1)*T^(l+1) + q_l*T^l + ... + q_0)' + (c_1 *v_1'/v_i + ... + c_m *v_m'/v_m)\n # (q_i*T^i)' = q_i'*T^i + q_i*i*T'*T^(i-1)\n # -> 0 = q_(l+1)'\n # p_l = (l+1)*q_(l+1)*T' + q_l'\n # ...\n # p_0 = q_1*T' + (qe_0)', qe_0 = q_0+(c_1 *log(v_1) + ... + c_m *log(v_m))\n for i in range(l,-1,-1):\n # q_(i+1) = d_(i+1) + b_(i+1)\n # integral(p_i-(i+1)*d_(i+1)*T') = l*b_(i+1)*T' + q_i\n #i = Rational.fromFloat(i)\n integrand = p[i]+(-ONE)*(i+1)*d[i+1]*log_diff_u\n if type(integrand)==float or type(integrand)==int:\n integrand = Rational.fromFloat(integrand)\n int_reduced = integrand.reduceToLowestPossibleFieldTower()\n if int_reduced!=None:\n integrand = int_reduced\n P_i = Integrate(integrand)\n if i>0:\n IntegratePolynomialPartLogExtCheckIntegralConditions(P_i, fieldTower)\n else:\n if P_i==None:\n raise Int.IntegralNotElementaryError()\n \n prev = fieldTower.prevTower()\n #logs = P_i.getNewLogExpressionsInFieldTower(prev,fieldTower)\n logTerm = P_i.getLogExpression(u)\n otherLogs = [log for log in P_i.logExpressions if not log==logTerm]#in logs]\n ci = ZERO if logTerm==None else logTerm.factor # integral = P_i = c_i*T+d_i, d_i = P_i\\logs\n d[i] = Int.Integral(poly_rationals=P_i.poly_rational_partExpressions,logs=otherLogs,rootSums=P_i.rootSums).asFunction()\n b[i+1] = ci/(i+1)\n q[i+1] = d[i+1]+b[i+1]\n \n #integrand = p[0]+(-1)*d[1]*log_diff_u\n #P0 = Integrate(integrand)\n #if P0==None:\n # raise Int.IntegralNotElementaryError()\n #b1 = 0\n #for log in P0.logExpressions:\n # if log.argFunction==u:\n # b1 = log.factor\n \n if b[1]==0:\n q_0 = P_i.asFunction()\n else:\n q_0 = (P_i+Int.Integral(logs=[Int.LogFunction(u,(-1)*b[1])])).asFunction()\n q[0] = q_0\n \n integralPoly = Pol.Polynomial(variable=fieldTower.getLastVariable())\n for i in range(l+1,-1,-1):\n integralPoly.setCoefficient(i,q[i], callUpdates=False)\n integralPoly.updateCoefficientsAll()\n \n return Int.Integral(poly_rationals=[integralPoly])",
"def poly(x, coeffs):\n return np.sum([coeffs[i] * x ** i for i in range(len(coeffs))], axis=0)",
"def test_coeff_convert():\n assert np.allclose(\n fcoeffs2coeffs(np.array([1.0, 2, 1, 1])), P.polymul([1, 1, 2], [1, 1])\n )\n assert np.allclose(\n fcoeffs2coeffs(np.array([1.0, 2, 1.2])), P.polymul([1, 1, 2], 1.2)\n )\n assert np.allclose(fcoeffs2coeffs(np.array([1.0, 2])), P.polymul([1, 1], 2))",
"def calc_cogen_const(q_heat_Wh, thermal_eff, electrical_eff):\n q_fuel_Wh = q_heat_Wh / thermal_eff\n p_el_Wh = q_fuel_Wh * electrical_eff\n q_anth_Wh = q_fuel_Wh - (q_heat_Wh + p_el_Wh)\n return q_fuel_Wh, p_el_Wh, q_anth_Wh",
"def F(cst, x):\n [u0, v0, u1, v1, u2, v2, coeffs] = cst\n [u, v, g1, g2, g3] = x\n a = g1*u1 - u0\n b = g2*u2 - u0\n c = g3*u - u0\n l = g1*v1 - v0 \n m = g2*v2 - v0\n n = g3*v - v0\n r = g1 - 1\n s = g2 - 1\n t = g3 - 1\n return np.array([\n coeffs[0]*(a**2-l**2) + 2*coeffs[1]*(a*b-l*m) + coeffs[2]*(b**2-m**2) + 2*coeffs[3]*(a*c-l*n) + 2*coeffs[4]*(b*c-m*n) + c**2 - n**2,\n coeffs[0]*(l**2-r**2) + 2*coeffs[1]*(l*m-r*s) + coeffs[2]*(m**2-s**2) + 2*coeffs[3]*(l*n-r*t) + 2*coeffs[4]*(m*n-s*t) + n**2 - t**2,\n coeffs[0]*a*l + coeffs[1]*(l*b+m*a) + coeffs[2]*m*b + coeffs[3]*(l*c+n*a) + coeffs[4]*(m*c+b*n) + c*n,\n coeffs[0]*a*r + coeffs[1]*(r*b+s*a) + coeffs[2]*s*b + coeffs[3]*(r*c+t*a) + coeffs[4]*(s*c+b*t) + c*t,\n coeffs[0]*r*l + coeffs[1]*(l*s+m*r) + coeffs[2]*m*s + coeffs[3]*(l*t+n*r) + coeffs[4]*(m*t+s*n) + t*n \n ])",
"def calculate_coefficients(self):\n for i in range(0, self.nz):\n zno = i * self.dz\n self.z[0][i] = zno\n plot_eccentricity_error = False\n position = -1\n for j in range(0, self.ntheta):\n # fmt: off\n self.gama[i][j] = j * self.dtheta + (np.pi - self.beta)\n [radius_external, self.xre[i][j], self.yre[i][j]] = \\\n self.external_radius_function(self.gama[i][j])\n [radius_internal, self.xri[i][j], self.yri[i][j]] = \\\n self.internal_radius_function(zno, self.gama[i][j])\n self.re[i][j] = radius_external\n self.ri[i][j] = radius_internal\n\n w = self.omega * self.ri[i][j]\n\n k = (self.re[i][j] ** 2 * (np.log(self.re[i][j]) - 1 / 2) - self.ri[i][j] ** 2 *\n (np.log(self.ri[i][j]) - 1 / 2)) / (self.ri[i][j] ** 2 - self.re[i][j] ** 2)\n\n self.c1[i][j] = (1 / (4 * self.viscosity)) * ((self.re[i][j] ** 2 * np.log(self.re[i][j]) -\n self.ri[i][j] ** 2 * np.log(self.ri[i][j]) +\n (self.re[i][j] ** 2 - self.ri[i][j] ** 2) *\n (k - 1)) - 2 * self.re[i][j] ** 2 * (\n (np.log(self.re[i][j]) + k - 1 / 2) * np.log(\n self.re[i][j] / self.ri[i][j])))\n\n self.c2[i][j] = (- self.ri[i][j] ** 2) / (8 * self.viscosity) * \\\n ((self.re[i][j] ** 2 - self.ri[i][j] ** 2 -\n (self.re[i][j] ** 4 - self.ri[i][j] ** 4) /\n (2 * self.ri[i][j] ** 2)) +\n ((self.re[i][j] ** 2 - self.ri[i][j] ** 2) /\n (self.ri[i][j] ** 2 *\n np.log(self.re[i][j] / self.ri[i][j]))) *\n (self.re[i][j] ** 2 * np.log(self.re[i][j] / self.ri[i][j]) -\n (self.re[i][j] ** 2 - self.ri[i][j] ** 2) / 2))\n\n self.c0w[i][j] = (- w * self.ri[i][j] *\n (np.log(self.re[i][j] / self.ri[i][j]) *\n (1 + (self.ri[i][j] ** 2) / (self.re[i][j] ** 2 - self.ri[i][j] ** 2)) - 1 / 2))\n # fmt: on\n if not plot_eccentricity_error:\n if abs(self.xri[i][j]) > abs(self.xre[i][j]) or abs(\n self.yri[i][j]\n ) > abs(self.yre[i][j]):\n plot_eccentricity_error = True\n position = i\n if plot_eccentricity_error:\n self.plot_eccentricity(position)\n sys.exit(\n \"Error: The given parameters create a rotor that is not inside the stator. \"\n \"Check the plotted figure and fix accordingly.\"\n )"
] | [
"0.5734576",
"0.56887656",
"0.5681322",
"0.5651976",
"0.56423664",
"0.5576589",
"0.5528677",
"0.5461771",
"0.5373789",
"0.5370976",
"0.5368531",
"0.5363878",
"0.5342567",
"0.5324895",
"0.5313149",
"0.52953506",
"0.5280881",
"0.5260313",
"0.5219243",
"0.52093476",
"0.5208789",
"0.5201552",
"0.51979464",
"0.51603365",
"0.5154393",
"0.51533365",
"0.5150926",
"0.5139898",
"0.5139897",
"0.5138385"
] | 0.57912767 | 0 |
This uses the header and tries out all possible functions for getting the wavelength solution coefficients returns a dictionary with keywords in ['linear','ctype','crvl','wcs','w0','co_0','wv_0','spectre'] | def wlsoln_coeff_from_header (pyfits_header, apply_WCS_rv=False, preferred=None, output='sel'):
# coefficient choices
cc = {}
#========================================================================#
# linear dispersion
cc['linear'] = coeff_basic_linear(pyfits_header)
#========================================================================#
# using keywords ctype, crval, crpix, cdelt
cc['ctype1'] = coeff_from_ctype1(pyfits_header)
#========================================================================#
# linear dispersion using keywords linintrp, crvl1_?, cdlt1_?
# from IRAF, order by order !! do I need to look up what the 1_ means?
# some of these are doubled by WAT0_001 stuff
cc['crvl'] = coeff_from_crvl(pyfits_header)
# if preferred_disp == 'any' or preferred_disp == 'linear' or preferred_disp == 'crvl' or preferred_disp == 'makee linear':
#========================================================================#
# IRAF WCS keywords WAT?_001
#if preferred_disp == 'any' or preferred_disp == 'IRAF_WCS':
cc['wcs'] = coeff_from_wcs(pyfits_header,apply_WCS_rv)
#========================================================================#
# linear dispersion for keywords w0 and wpc
cc['w0'] = coeff_from_w0(pyfits_header)
#if preferred_disp == 'any' or preferred_disp == 'linear' or preferred_disp == 'w0':
#========================================================================#
# MAKEE type dispersion using keywords co_0_? and co_4_?
# I'm not sure what type of coefficients these are !!
#cc['co_0'] = coeff_from_makee_c0(pyfits_header)
# if preferred_disp == 'any' or preferred_disp == 'makee' or preferred_disp == 'co_0':
#========================================================================#
# MAKEE coeffificients using keywords wv_0_? and wv_4_?
cc['wv_0'] = coeff_from_makee_wv(pyfits_header)
#if preferred_disp == 'any' or preferred_disp == 'makee' or preferred_disp == 'wv_0':
#========================================================================#
# spectre type dispersion
cc['spectre'] = coeff_from_SPECTRE(pyfits_header)
#if preferred_disp == 'any' or preferred_disp == 'spectre':
#========================================================================#
#========================================================================#
if output == 'all': return cc
return resolve_wlsoln_coeffs(cc,preferred) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def header(fpath):\n # If you want to change something, instead of overwriting a bug, add a new\n # key with the desired functionallity. This way, prior code doesn't break.\n # One can be very waste full with this function as it is fast anyways.\n\n\n ret = {}\n with open(fpath) as f:\n for line in f:\n if line[0] is not \"#\":\n break\n # Strip comment marker\n line = line[2:]\n name, value = line.split(\"=\")\n # Strip newline\n ret[name] = value[:-1]\n\n # To have some compatibility between spe veronica and viktor files,\n # we further unify some of the namings\n ret['gain'] = ret.get('Gain')\n\n exp_time = ret.get('ExposureTime [s]')\n if exp_time:\n ret['exposure_time'] = datetime.timedelta(seconds=float(exp_time))\n\n hbin = ret.get('HBin')\n if hbin:\n ret['hbin'] = {'ON': True}.get(value, False)\n\n cw = ret.get('Central-Wavelength')\n if cw:\n ret['central_wl'] = float(cw)\n\n vis_wl = ret.get('vis-Wavelength')\n if vis_wl:\n ret['vis_wl'] = float(vis_wl)\n\n syringe_pos = ret.get('Syringe Pos')\n if syringe_pos:\n ret['syringe_pos'] = int(syringe_pos)\n\n cursor = ret.get(\"Cursor\")\n if cursor:\n ret['cursor'] = tuple([int(elm) for elm in cursor.split('\\t')])\n\n x_mirror = ret.get('x-mirror')\n if x_mirror:\n ret['x_mirror'] = {'ON': True}.get(x_mirror, False)\n\n calib_coeff = ret.get('calib Coeff')\n if calib_coeff:\n ret['calib Coeff'] = tuple([float(elm) for elm in calib_coeff.split('\\t')])\n # Index 0 is actually central_wl during calibration,\n ret['calib_central_wl'] = ret['calib Coeff'][0]\n\n\n # For np.poly1d the calibration coefficents need to be in decreasing\n # order and no zero values are not allowed\n _cc = np.array(ret['calib Coeff'][1:])\n ret['calib_coeff'] = _cc[np.nonzero(_cc)][::-1]\n\n scan_start_time = ret.get('Scan Start time')\n if scan_start_time:\n ret['date'] = datetime.datetime.strptime(scan_start_time, '%d.%m.%Y %H:%M:%S')\n\n scan_stop_time = ret.get('Scan Stop time')\n if scan_stop_time:\n ret['date_stop'] = datetime.datetime.strptime(scan_stop_time, '%d.%m.%Y %H:%M:%S')\n\n timedelay = ret.get('Timedelay')\n if timedelay:\n ret['timedelay'] = np.array([int(elm) for elm in timedelay.split('\\t')])\n\n timedelay_pos= ret.get('Timedelay Pos')\n if timedelay_pos:\n ret['timedel_pos'] = np.array([int(elm) for elm in timedelay_pos.split('\\t')])\n\n return ret",
"def prep_hd(header,phi_c,lambda_c,nx,ny,dx,dy):\n header_out = {}\n\n # Keywords to get from original header\n keys_hd = ['TELESCOP', 'INSTRUME', 'WAVELNTH', 'CAMERA','DATE',\n 'DATE_S','DATE-OBS','T_OBS','T_REC','TRECEPOC',\n 'TRECSTEP','TRECUNIT','HARPNUM','DSUN_OBS','DSUN_REF',\n 'RSUN_REF','CRLN_OBS','CRLT_OBS','CAR_ROT','OBS_VR',\n 'OBS_VW','OBS_VN','RSUN_OBS','QUALITY','QUAL_S','QUALLEV1']\n\n for key in keys_hd:\n header_out[key] = header[key]\n\n # Add new keywords\n header_out['NAXIS'] = 2\n header_out['NAXIS1'] = nx\n header_out['NAXIS2'] = ny\n\n header_out['CUNIT1'] = 'degree'\n header_out['CUNIT2'] = 'degree'\n\n header_out['CRPIX1'] = (nx - 1) / 2 + 1\n header_out['CRPIX2'] = (ny - 1) / 2 + 1\n header_out['CRVAL1'] = phi_c\n header_out['CRVAL2'] = lambda_c\n header_out['CDELT1'] = dx\n header_out['CDELT2'] = dy\n header_out['CTYPE1'] = 'CRLN-CEA'\n header_out['CTYPE2'] = 'CRLT-CEA'\n header_out['CROTA2'] = 0.0\n\n header_out['WCSNAME'] = 'Carrington Heliographic'\n header_out['BUNIT'] = 'Mx/cm^2'\n\n return header_out",
"def get_aperture_coeffs_in_header(head):\n\n coeffs = {}\n for key, value in head.items():\n exp = '^GAMSE TRACE CHANNEL [A-Z] APERTURE \\d+ COEFF \\d+$'\n if re.match(exp, key) is not None:\n g = key.split()\n channel = g[3]\n aperture = int(g[5])\n icoeff = int(g[7])\n if (channel, aperture) not in coeffs:\n coeffs[(channel, aperture)] = []\n if len(coeffs[(channel, aperture)]) == icoeff:\n coeffs[(channel, aperture)].append(value)\n return coeffs",
"def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)",
"def get_calib_from_header(header):\n\n prefix = 'HIERARCH GAMSE WLCALIB '\n\n xorder = header[prefix+'XORDER']\n yorder = header[prefix+'YORDER']\n\n coeff = np.zeros((yorder+1, xorder+1))\n for j, i in itertools.product(range(yorder+1), range(xorder+1)):\n coeff[j,i] = header[prefix+'COEFF {:d} {:d}'.format(j, i)]\n\n calib = {\n 'coeff': coeff,\n 'npixel': header[prefix+'NPIXEL'],\n 'k': header[prefix+'K'],\n 'offset': header[prefix+'OFFSET'],\n 'std': header[prefix+'STDDEV'],\n 'nuse': header[prefix+'NUSE'],\n 'ntot': header[prefix+'NTOT'],\n# 'identlist': calibwindow.identlist,\n 'window_size': header[prefix+'WINDOW_SIZE'],\n 'xorder': xorder,\n 'yorder': yorder,\n 'maxiter': header[prefix+'MAXITER'],\n 'clipping': header[prefix+'CLIPPING'],\n 'q_threshold': header[prefix+'Q_THRESHOLD'],\n 'direction': header[prefix+'DIRECTION'],\n }\n return calib",
"def get_sip_keywords(header):\n cd = np.matrix([[header.get('CD1_1', 0.0), header.get('CD1_2', 0.0)],\n [header.get('CD2_1', 0.0), header.get('CD2_2', 0.0)]], dtype=np.float64)\n a_order = int(header.get('A_ORDER', 0))\n b_order = int(header.get('B_ORDER', 0))\n ac = np.matrix(np.zeros((a_order+1, a_order+1), dtype=np.float64))\n bc = np.matrix(np.zeros((b_order+1, b_order+1), dtype=np.float64))\n for m in range(a_order+1):\n for n in range(0, a_order+1-m):\n ac[m, n] = header.get('A_%d_%d' % (m, n), 0.0)\n for m in range(b_order+1):\n for n in range(0, b_order+1-m):\n bc[m, n] = header.get('B_%d_%d' % (m, n), 0.0)\n return cd, ac, bc",
"def wavelength_solution(file_name):\n file_data = read_file(file_name)\n header_data = file_data[0]\n image_data = file_data[1]\n\n range_begin = header_data['CRVAL3']\n pixel_begin = header_data['CRPIX3']\n step_size = header_data['CD3_3']\n steps = len(image_data)\n range_end = range_begin + steps * step_size\n return {'begin': range_begin, 'end': range_end, 'steps': steps}",
"def getCoeff(self, path, prefix, verbose = True):\n\n fpath = path + prefix + \".coeff\"\n # print 'get coeff path', fpath\n\n if not os.path.isfile(fpath):\n if verbose: print \".coeff file not found - please check!\"\n return False\n\n with open(fpath, \"rb\") as f:\n s = np.fromfile(f, count=4, dtype=np.int32)\n NFILT, NTEMP, NZ, NOBJ = s[0], s[1], s[2], s[3]\n coeffs = np.fromfile(f, count = NOBJ*NTEMP, dtype = np.double).reshape((NOBJ, NTEMP))\n izbest = np.fromfile(f, count = NOBJ, dtype = np.int32)\n tnorm = np.fromfile(f, count = NTEMP, dtype = np.double)\n\n keys = ['NFILT','NTEMP','NZ','NOBJ','coeffs','izbest','tnorm']\n values = [NFILT, NTEMP, NZ, NOBJ, coeffs, izbest, tnorm]\n\n if verbose: print \".coeff file found and read in correctly!\"\n return dict(zip(keys, values))",
"def ogip_dictionary_arf():\n \"\"\"\n this function returns the required and optional keywords and columns\n as defined by OGIP 92-002 and 92-002a\n \"\"\"\n global status\n global REPORT\n\n \"\"\"\n FOR the ARF file:\n \"\"\"\n \"\"\"\n Define REQUIRED Keywords for SPECRESP EXTENSION (note: EXTNAME is SPECRESP)\n \"\"\"\n reqkeys = ['TELESCOP', 'INSTRUME']\n reqkeys.append('FILTER')\n reqkeys.append('CHANTYPE[PHA|PI]')\n reqkeys.append('DETCHANS')\n reqkeys.append('HDUCLASS[OGIP]')\n reqkeys.append('HDUCLAS1[RESPONSE]')\n reqkeys.append('HDUCLAS2[SPECRESP]')\n reqkeys.append('HDUVERS[1.1.0]')\n reqkeys.append('TLMIN*')\n reqkeys.append('NUMGRP')\n reqkeys.append('NUMELT')\n reqkeys.append('CCLS0001[CPF]')\n reqkeys.append('CCNM0001[SPECRESP]')\n reqkeys.append('CDTP0001[DATA]')\n reqkeys.append('CVSD0001')\n reqkeys.append('CVST0001')\n reqkeys.append('CDES0001')\n\n \"\"\"\n Define recommended Keywords\n \"\"\"\n optkeys = ['PHAFILE']\n optkeys.append('LO_THRES') # minimum probability threshold in matrix (values < this are set to 0)\n optkeys.append('HDUCLAS3[REDIST|DETECTOR|FULL]') # required if channel numbering doesn't start at 1\n optkeys.append('RMFVERSN[1992A]')\n optkeys.append('HDUVERS1[1.1.0]')\n optkeys.append('HDUVERS2[1.2.0]')\n\n \"\"\"\n Define Required Columns\n \"\"\"\n reqcols = ['ENERG_LO'] # lower energy bound of bin (keV)\n reqcols.append('ENERG_HI') # upper energy bound of bin (keV); generally ENERG_LO(J) = ENERG_HI(J-1)\n reqcols.append('SPECRESP') # the \"effective area\"\n\n\n \"\"\"\n Define Optional Columns\n \"\"\"\n optcols = [] # dispersion order for grating data\n\n specresp = {'KEYWORDS':{'REQUIRED':reqkeys,'RECOMMENDED':optkeys}, 'COLUMNS':{'REQUIRED':reqcols,'RECOMMENDED':optcols}}\n\n extns={'REQUIRED':['SPECRESP'],'OPTIONAL':[]}\n #\n # create structure for the ARF file\n #\n ogip = {'EXTENSIONS':extns,\n 'SPECRESP':specresp,\n 'REFERENCE':'OGIP/92-002',\n 'REFURL':'https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/ofwg_recomm.html',\n 'REFTITLE':'The Calibration Requirements for Spectral Analysis'}\n\n return ogip",
"def read_spectral_k(filename=\"tc_dos_l.dat\"):\n # column headers for the data \n #tcdosl_labels = [\n # \"wavelength\",\n # \"k_xx_raw\",\"k_xx_smooth\",\n # \"k_yy_raw\",\"k_yy_smooth\",\n # \"k_zz_raw\",\"k_zz_smooth\"]\n\n tcdosl_labels = [\n \"wavelength\",\n \"k_xx_raw\",\"k_yy_raw\",\"k_zz_raw\",\n \"k_xx_smooth\",\"k_yy_smooth\",\"k_zz_smooth\"]\n\n def subselect_table_block(i_start,lines):\n i = i_start + 1\n\n table = []\n while(lines[i].strip() != \"\"):\n args = lines[i].split()\n args = [arg.strip() for arg in args]\n args = [float(arg) for arg in args]\n table.append(args)\n i += 1 \n return np.array(table)\n\n line = None # initialize\n with open(filename,'r') as f:\n lines = f.readlines()\n lines = [s.strip() for s in lines]\n\n temperatures = []\n tcdosl_dict = OrderedDict()\n\n for il,line in enumerate(lines):\n if line.startswith('# Temp:'):\n args = line.split(':')\n T = int(float(args[1].strip()))\n temperatures.append(T)\n tcdosl_dict[T] = subselect_table_block(il,lines)\n\n tcdosl_df_dict = OrderedDict()\n for temp in temperatures:\n tcdosl_df_dict[temp] = pd.DataFrame(\n copy.deepcopy(tcdosl_dict[temp]),\n columns=list(tcdosl_labels))\n\n return {k:v.copy() for k,v in tcdosl_df_dict.items()}",
"def wfc3ir_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589, \n flt='ibhj34h6q_flt.fits', filter='G141'):\n import numpy as np\n \n import astropy.io.fits as pyfits\n import astropy.wcs as pywcs\n \n im = pyfits.open(flt)\n wcs = pywcs.WCS(im[1].header, relax=True)\n \n thet0 = np.arctan2(im[1].header['CD2_2'], im[1].header['CD2_1'])/np.pi*180\n\n wcs.wcs.crval = np.array([ra, dec])\n \n ### Rotate the CD matrix\n theta = im[1].header['PA_APER'] - pa_aper \n cd_rot = rotate_CD_matrix(wcs.wcs.cd, theta)\n wcs.wcs.cd = cd_rot\n \n h = wcs.to_header(relax=True)\n \n for i in [1,2]:\n for j in [1,2]:\n h['CD%d_%d' %(i,j)] = h['PC%d_%d' %(i,j)]\n h.remove('PC%d_%d' %(i,j))\n \n h['BACKGR'] = 1.\n h['FILTER'] = filter\n h['INSTRUME'] = 'WFC3'\n h['READN'] = im[0].header['READNSEA']\n h['NAXIS1'] = h['NAXIS2'] = 1014\n h['DETECTOR'] = 'IR'\n h['PHOTFLAM'] = 1.\n h['PHOTPLAM'] = 1.\n \n return h, wcs",
"def readin (filename, hdu=0, non_std_fits=False,\n text_comments='#', text_skiprows=0, get_data=False, verbose=False,\n apply_WCS_rv=False):\n multi_order_txt = False\n use_naxis2='all'\n use_naxis3='all'\n \n \n preferred_wlsoln=None # !! need to fix this\n # !! should also be able to input wavelength solution?\n \n if preferred_wlsoln is not None: preferred_wlsoln = wlsolvefxn.get_func_name(preferred_wlsoln)\n \n #### check if file exists ####### #############\n if not os.path.exists(filename): raise IOError(\"File does not exist:'\"+filename+\"'\")\n\n\n #### check if file is text############# \n np_kwargs = {'comments':text_comments,\n 'skiprows':text_skiprows}\n is_text_file, txt_data = check_for_txt_format(filename,**np_kwargs)\n\n #### if it is a text file ######################\n if is_text_file:\n spec_obj = readin_txt(filename,txt_data,get_data) \n return spec_obj \n\n #### now check how it behaves as a fits file\n if non_std_fits: hdulist = pyfits.open(filename)\n else:\n # give standard pyfits readin a try\n try: hdulist = pyfits.open(filename)\n except: raise IOError(\"PYFITS DOES NOT LIKE THE FILE YOU GAVE ('\"+filename+\"'), TO SEE WHAT ERROR IT GIVES TRY: hdulist = pyfits.open('\"+filename+\"')\")\n\n\n #### open up fits file ##############################\n hdulist = pyfits.open(filename)\n\n # select which header unit ot use\n if len(hdulist) > 1: \n hdu = int(hdu)\n hdu = np.clip(hdu,0,len(hdulist)-1)\n else: hdu = 0\n\n # specify the current header unit\n header_unit = hdulist[hdu]\n prihdr = header_unit.header\n\n # can display some useful information \n if verbose: \n print \"=\"*60\n print (hdulist.info(),'\\n')\n if len(hdulist) > 1:\n print \"=\"*20+\" USING HEADER: \"+\"=\"*20\n print repr(hdulist[hdu])\n\n ##### fill in the data class\n # not get header info of relevance\n simple = query_fits_header(prihdr,'SIMPLE',noval=False)\n xtension = query_fits_header(prihdr,'XTENSION')\n if simple.found:\n if not simple.val: print \"HeadsUp: Header Keyword SIMPLE is False, you may encounter unexpected behavior\"\n else:\n if not xtension.found: print \"HeadsUp: No extension keyword found in headers, you may encounter unexpected behavior\"\n \n \n #### read in important information from header, if present\n ibits = query_fits_header(prihdr,'BITPIX') # how many bits per pixel in the data? Not currently necessary, numpy will adapt\n \n naxis = query_fits_header(prihdr,'NAXIS' ,noval=0) # how many dimenstions?\n naxis1 = query_fits_header(prihdr,'NAXIS1',noval=0) # number of points per order\n naxis2 = query_fits_header(prihdr,'NAXIS2',noval=0) # number of orders\n naxis3 = query_fits_header(prihdr,'NAXIS3',noval=0) # number of different spectra\n\n apformat = query_fits_header(prihdr,'APFORMAT')\n if apformat.found: print \"WARNING: I'M NOT SURE HOW TO DEAL WITH APFORMAT VALUES\" # !! though I think it's just the spec files\n\n if not naxis.found: raise IOError(\"ERROR: Keyword NAXIS not found\")\n\n bzero = query_fits_header(prihdr,\"BZERO\",noval=0)\n bscale = query_fits_header(prihdr,\"BSCALE\",noval=1)\n\n ###### read in data ##############################################\n data = header_unit.data\n\n if data is None:\n wl, data, inv_var = np.zeros(3).reshape((3,1))\n if get_data: return (wl,data,inv_var)\n else: return eyeSpec_spec(wl,data,inv_var,header_unit.header)\n else:\n # check that data matches up with at least one of the dimensions\n if data.ndim != naxis.val: raise ValueError(\"Dimension of data \"+str(data.ndim)+\" does not match keyword naxis \"+str(naxis.val))\n \n statement = 'Dimension does not match data.shape = '+str(data.shape)+\" fits file (naxis1, naxis2, naxis3) \"+str(tuple([naxis1.val,naxis2.val,naxis3.val]))\n if data.ndim == 1: \n assert data.shape == (naxis1.val,) , statement\n data = data.reshape((1,1,)+data.shape)\n \n elif data.ndim == 2: \n assert data.shape == (naxis2.val, naxis1.val), statement\n data = data.reshape((1,)+data.shape) \n \n elif data.ndim == 3: \n assert data.shape == (naxis3.val, naxis2.val, naxis1.val), statement\n \n ##### Determine the which data is useful \n # which orders to read in \n nband = np.arange(data.shape[0])+1\n nord = np.arange(data.shape[1])+1\n\n \n ##### Calculate the wavelengths for the data\n # set up wavelength and inverse_variance\n wl = np.ones(data.shape)\n \n # get the wavelength coefficients\n wlcoeff = wlsoln_coeff_from_header(header_unit.header, apply_WCS_rv, preferred_wlsoln)\n \n # the same wavelength solution is applied to all bands so just pick the first and broadcast\n band = 0\n priv_info = {}\n \n # go through all the orders\n do_progress = True\n progressive_pt = 1 # this will advance and be used when there is no wavelength solution\n for i in xrange(len(nord)):\n order_i = nord[i]\n\n # get the coefficients and function type \n equ_type = wlcoeff.get_equation_type()\n if equ_type in ['none',None,'no solution'] and do_progress: \n coeff = [progressive_pt,1]\n equ_type = 'pts'\n else: coeff = wlcoeff.get_coeffs(order_i)\n \n # pts[0] = 1 :: this was definitely the right thing to do for SPECTRE's 1-D output but may not be for other equations, may need pts[0]=0, this may be for bzero,bscale\n pts = np.arange(len(wl[0][i]))+1 \n # apply function\n wl[0][i] = wlsolvefxn(pts, coeff, equ_type) \n \n progressive_pt += len(pts)\n \n for j in xrange(len(nband)): \n band_j = nband[j]\n if (band_j,order_i) not in priv_info: priv_info[(band_j,order_i)] = {} \n # record the private information\n priv_info[(band_j,order_i)]['disp']= [coeff, equ_type]\n priv_info[(band_j,order_i)]['rv'] = [0] \n priv_info[(band_j,order_i)]['disp extr'] = deepcopy(wlcoeff.extra)\n \n # now propogate the solution to the other bands\n stdwl = wl[0]\n for i in xrange(1,len(nband)): wl[i] = stdwl \n \n inv_var = np.ones(data.shape)\n #=================================================================#\n # return the data .OR. go on and create the spec_obj\n if get_data: return (wl, data, inv_var)\n\n #=================================================================# \n spec_obj = eyeSpec_spec(wl,data,inv_var,header_unit.header)\n # set up private information\n priv_info['filename'] = filename\n spec_obj.filename = filename\n \n bands = np.array(np.arange(1,len(data)+1),dtype=str)\n band_info = {}\n i = -1\n for key in prihdr.keys():\n if key[:6] != 'BANDID': continue\n if i < len(bands):\n i+=1\n bands[i] = prihdr[key]\n band_info[key] = prihdr[key]\n else: raise IOError(\"MORE BANDID KEYWORDS IN HEADER THAN FIRST DIMENSION OF DATA\") \n\n # add band info if available:\n if len(band_info) != 0: priv_info['bandids'] = band_info\n else: priv_info['bandids'] = None\n \n # match up the private info created during read in to the spec_obj\n for key in priv_info: spec_obj._private_info[key] = priv_info[key]\n \n # map fits value => acutal index\n # spec_obj._bands = {}\n # spec_obj._orders = {}\n # for i in range(len(nspec)): spec_obj._bands[nspec[i]] = i\n # for i in range(len(nord)): spec_obj._orders[nord[i]] = i\n # \n \n if 7 in nband: spec_obj.set_band(6) # this is where Magellian data stores it's object data, i.e. BANDID7 which is index 6\n\n if len(hdulist) > 1: spec_obj.hdrlist = [h.header for h in hdulist]\n \n return spec_obj",
"def spectrum_funcs(model):\n\n def mk_dnde(fn):\n def dnde(photon_energies, cme):\n return fn(model, photon_energies, cme)\n\n return dnde\n\n return {\n \"mu mu\": mk_dnde(dnde_mumu),\n \"e e\": mk_dnde(dnde_ee),\n \"pi0 pi pi\": mk_dnde(dnde_pi0pipi),\n \"pi0 pi0 pi0\": mk_dnde(dnde_pi0pi0pi0),\n \"p p\": mk_dnde(dnde_pp),\n }",
"def _readheader(lines):\n hdrdict = {}\n #input list of 26 lines of header\n #station and channel\n line = lines[5]\n parts = line.strip().split()\n fname = parts[1]\n fparts = fname.split('_')\n hdrdict['station'] = fparts[-2]+'_'+fparts[-1]\n\n #the \"Component\" lines look like either: Component S00W, Component S90E, Component Up\n compstr = lines[12].strip().split()[1]\n hdrdict['channel'] = get_comp_name(compstr)\n\n #instrument\n hdrdict['instrument'] = lines[3].split()[1].strip()\n \n #location string\n line = lines[6]\n hdrdict['location'] = line.strip()\n #event origin, buffer start year/month\n line = lines[16]\n parts = line.strip().split()\n bufyear = int(parts[8])\n bufmonth = int(parts[9])\n #epicentral location, buffer start day/hour\n line = lines[17]\n parts = line.strip().split()\n bufday = int(parts[8])\n bufhour = int(parts[9])\n #numpoints, buffer start min/sec\n line = lines[19]\n parts = line.strip().split()\n hdrdict['npts'] = int(parts[0])\n bufmin = int(parts[8])\n millisec = int(parts[9])\n bufsec = int(millisec/1000)\n bufmicrosec = int(np.round(millisec/1000.0 - bufsec))\n hdrdict['starttime'] = UTCDateTime(datetime(bufyear,bufmonth,bufday,bufhour,bufmin,bufsec,bufmicrosec))\n #part C\n #frequency, calibration value and some other stuff we don't care about\n line = lines[20]\n parts = line.strip().split()\n hdrdict['sampling_rate'] = float(parts[0])\n hdrdict['delta'] = 1.0/hdrdict['sampling_rate']\n hdrdict['calib'] = float(parts[7])\n #site location info, this time in dd\n line = lines[21]\n parts = line.strip().split()\n hdrdict['lat'] = float(parts[0]) * -1\n hdrdict['lon'] = float(parts[1])\n hdrdict['height'] = 0.0\n #duration\n line = lines[22]\n parts = line.strip().split()\n hdrdict['duration'] = float(parts[0])\n hdrdict['endtime'] = hdrdict['starttime'] + hdrdict['duration']\n #max acceleration - good for sanity check\n line = lines[23]\n parts = line.strip().split()\n hdrdict['maxacc'] = float(parts[0])\n hdrdict['network'] = 'NZ'\n hdrdict['units'] = 'acc'\n return hdrdict",
"def GetParameters_and_Weight_of_CalSensor(ind, similar_sensors): \n v, a, h = similar_sensors.loc[ind]['Vert_Shift'], similar_sensors.loc[ind]['Amplitude'], similar_sensors.loc[ind]['Horiz_Shift']\n por, res, drain = similar_sensors.loc[ind]['Porosity'], similar_sensors.loc[ind]['Res_SM'], similar_sensors.loc[ind]['Drainage']\n n, w = similar_sensors.loc[ind]['n'], similar_sensors.loc[ind]['Weight']\n return v,a,h,por,res,drain,n,w",
"def read_data(filename):\n # Store debug mode\n debug = params.debug\n params.debug = None\n\n # Initialize dictionary\n header_dict = {}\n\n headername = filename + \".hdr\"\n\n with open(headername, \"r\") as f:\n # Replace characters for easier parsing\n hdata = f.read()\n hdata = hdata.replace(\",\\n\", \",\")\n hdata = hdata.replace(\"\\n,\", \",\")\n hdata = hdata.replace(\"{\\n\", \"{\")\n hdata = hdata.replace(\"\\n}\", \"}\")\n hdata = hdata.replace(\" \\n \", \"\")\n hdata = hdata.replace(\";\", \"\")\n hdata = hdata.split(\"\\n\")\n\n # Loop through and create a dictionary from the header file\n for i, string in enumerate(hdata):\n if ' = ' in string:\n header_data = string.split(\" = \")\n header_dict.update({header_data[0].rstrip(): header_data[1].rstrip()})\n elif ' : ' in string:\n header_data = string.split(\" : \")\n header_dict.update({header_data[0].rstrip(): header_data[1].rstrip()})\n\n # Reformat wavelengths\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].replace(\"{\", \"\")\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].replace(\"}\", \"\")\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].replace(\" \", \"\")\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].split(\",\")\n\n # Create dictionary of wavelengths\n wavelength_dict = {}\n for j, wavelength in enumerate(header_dict[\"wavelength\"]):\n wavelength_dict.update({float(wavelength): float(j)})\n\n # Replace datatype ID number with the numpy datatype\n dtype_dict = {\"1\": np.uint8, \"2\": np.int16, \"3\": np.int32, \"4\": np.float32, \"5\": np.float64, \"6\": np.complex64,\n \"9\": np.complex128, \"12\": np.uint16, \"13\": np.uint32, \"14\": np.uint64, \"15\": np.uint64}\n header_dict[\"data type\"] = dtype_dict[header_dict[\"data type\"]]\n\n # Read in the data from the file\n raw_data = np.fromfile(filename, header_dict[\"data type\"], -1)\n\n # Reshape the raw data into a datacube array\n array_data = raw_data.reshape(int(header_dict[\"lines\"]),\n int(header_dict[\"bands\"]),\n int(header_dict[\"samples\"])).transpose((0, 2, 1))\n\n if \"default bands\" in header_dict:\n header_dict[\"default bands\"] = header_dict[\"default bands\"].replace(\"{\", \"\")\n header_dict[\"default bands\"] = header_dict[\"default bands\"].replace(\"}\", \"\")\n default_bands = header_dict[\"default bands\"].split(\",\")\n\n pseudo_rgb = cv2.merge((array_data[:, :, int(default_bands[0])],\n array_data[:, :, int(default_bands[1])],\n array_data[:, :, int(default_bands[2])]))\n\n else:\n max_wavelength = max([float(i) for i in wavelength_dict.keys()])\n min_wavelength = min([float(i) for i in wavelength_dict.keys()])\n # Check range of available wavelength\n if max_wavelength >= 635 and min_wavelength <= 490:\n id_red = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 710)\n id_green = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 540)\n id_blue = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 480)\n\n pseudo_rgb = cv2.merge((array_data[:, :, [id_blue]],\n array_data[:, :, [id_green]],\n array_data[:, :, [id_red]]))\n else:\n # Otherwise take 3 wavelengths, first, middle and last available wavelength\n id_red = int(header_dict[\"bands\"]) - 1\n id_green = int(id_red / 2)\n pseudo_rgb = cv2.merge((array_data[:, :, [0]],\n array_data[:, :, [id_green]],\n array_data[:, :, [id_red]]))\n\n # Gamma correct pseudo_rgb image\n pseudo_rgb = pseudo_rgb ** (1 / 2.2)\n # Scale each of the channels up to 255\n pseudo_rgb = cv2.merge((rescale(pseudo_rgb[:, :, 0]),\n rescale(pseudo_rgb[:, :, 1]),\n rescale(pseudo_rgb[:, :, 2])))\n\n max_wl = float(str(header_dict[\"wavelength\"][-1]).rstrip())\n min_wl = float(str(header_dict[\"wavelength\"][0]).rstrip())\n\n # Create an instance of the spectral_data class\n spectral_array = Spectral_data(array_data=array_data, max_wavelength=max_wl,\n min_wavelength=min_wl, d_type=header_dict[\"data type\"],\n wavelength_dict=wavelength_dict, samples=int(header_dict[\"samples\"]),\n lines=int(header_dict[\"lines\"]), interleave=header_dict[\"interleave\"],\n wavelength_units=header_dict[\"wavelength units\"], array_type=\"datacube\",\n pseudo_rgb=pseudo_rgb, filename=filename)\n\n # Reset debug mode\n params.debug = debug\n\n if params.debug == \"plot\":\n # Gamma correct pseudo_rgb image\n plot_image(pseudo_rgb)\n elif params.debug == \"print\":\n print_image(pseudo_rgb, os.path.join(params.debug_outdir, str(params.device) + \"_pseudo_rgb.png\"))\n\n return spectral_array",
"def _build_header_dictionary(self):\n start = 0\n #print self.raw_data\n for a in range(20):\n redatapuller = re.compile(\"\\r\\n\\r\\n\\r\\n(?P<word>.*?)\\t.*?\\n\", re.DOTALL)\n m = redatapuller.search(self.raw_data[start:])\n if not(m):\n break\n self.header_dictionary[m.group(\"word\")] = start + m.end()\n if a==0:\n self.header_dictionary[\"main\"] = start + m.end()\n start += m.end()",
"def qc(filename, legacy=False, querySimbad=False):\n if not os.path.isfile(filename):\n print 'SKIPPING: file', filename, 'does not exist'\n return\n res = {} # the final result\n f = pyfits.open(filename)\n if f[0].header['INSTRUME'].strip() != 'PACMAN':\n print 'SKIPPING:', filename, 'is not a INSTRUME==\\'PACAMA\\' file!'\n return\n # -- propagate keywords:\n keywords = ['DATE-OBS', 'MJD-OBS']\n for k in keywords:\n try:\n res[k] = f[0].header[k]\n except:\n pass\n # -- propagate HIERARCH keywords:\n Hkeywords = ['TPL ID',\n 'INS MODE',\n 'ISS AMBI WINDSP']\n if legacy:\n Hkeywords.extend(['OCS PRESET SS ALPHA',\n 'OCS PRESET SS DELTA',\n 'OCS SS ID',\n 'OCS PRESET PS ALPHA',\n 'OCS PRESET PS DELTA',\n 'OCS PS ID'])\n else:\n Hkeywords.extend(['OCS TARG2 ALPHA',\n 'OCS TARG2 DELTA',\n 'OCS TARG2 NAME',\n 'OCS TARG2 KMAG',\n 'OCS TARG2 HMAG',\n 'OCS TARG1 ALPHA',\n 'OCS TARG1 DELTA',\n 'OCS TARG1 NAME',\n 'OCS TARG1 KMAG',\n 'OCS TARG1 HMAG'])\n for k in Hkeywords:\n try:\n res[k] = f[0].header['HIERARCH ESO '+k]\n except:\n pass\n # -- start-end keywords: need to do an average\n SEkeywords = ['ISS AIRM', 'ISS AMBI FWHM', 'ISS AMBI TAU0']\n for k in SEkeywords:\n try:\n res[k] = strictlyPositiveMean(\n [f[0].header['HIERARCH ESO '+k+' START'],\n f[0].header['HIERARCH ESO '+k+' END']])\n except:\n pass\n # -- FSUs calibrations:\n calibs = ['DARK', 'FLAT', 'PHAS', 'VISI', 'WAVE']\n fsus = ['FSUA', 'FSUB']\n channels = ['W', '1', '2', '3', '4', '5']\n try:\n for fsu in fsus:\n for calib in calibs:\n res[fsu+'_'+calib] = np.zeros((6,4))\n for k, chan in enumerate(channels):\n s = f[0].header['HIERARCH ESO OCS '+fsu+' K'+chan+calib]\n res[fsu+'_'+calib][k,:] = np.float_(s.split(','))\n res['FSUA FLAT-DARK W']=(res['FSUA_FLAT']-res['FSUA_DARK'])[0,:].mean()\n res['FSUB FLAT-DARK W']=(res['FSUB_FLAT']-res['FSUB_DARK'])[0,:].mean()\n except:\n pass\n return res",
"def coefficients(self, force_characters = False) :\n if len(self.__coefficients) == 0 :\n return dict()\n elif not force_characters and len(self.__coefficients) == 1 :\n return self.__coefficients.values()[0] \n else :\n return self.__coefficients",
"def coef_val():\n\n basepath = path.join(path.dirname(path.realpath('__file__')), 'data')\n fdata = basepath + path.sep + 'VAWTPolySurfaceCoef_pub.csv' # published coefficients from paper\n # fdata = basepath + path.sep + 'VAWTPolySurfaceCoef.csv' # polynomial surface fitting coefficients\n\n loc1 = np.zeros(10)\n loc2 = np.zeros(10)\n loc3 = np.zeros(10)\n spr1 = np.zeros(10)\n spr2 = np.zeros(10)\n skw1 = np.zeros(10)\n skw2 = np.zeros(10)\n scl1 = np.zeros(10)\n scl2 = np.zeros(10)\n scl3 = np.zeros(10)\n\n f = open(fdata)\n csv_f = csv.reader(f)\n\n i = 0\n for row in csv_f:\n if i != 0:\n loc1[i-1] = float(row[0])\n loc2[i-1] = float(row[1])\n loc3[i-1] = float(row[2])\n spr1[i-1] = float(row[3])\n spr2[i-1] = float(row[4])\n skw1[i-1] = float(row[5])\n skw2[i-1] = float(row[6])\n scl1[i-1] = float(row[7])\n scl2[i-1] = float(row[8])\n scl3[i-1] = float(row[9])\n i += 1\n\n f.close()\n\n return loc1,loc2,loc3,spr1,spr2,skw1,skw2,scl1,scl2,scl3",
"def getDictCWells(self,itype):\n #Method begins here\n #nx=self.__grid['nx'] #From the geometry in grid\n ny=self.__grid['ny']\n nz=self.__grid['nz']\n minx=self.__grid['ox']\n miny=self.__grid['oy']\n minz=self.__grid['oz']\n rx=self.__grid['dx']\n ry=self.__grid['dy']\n rz=self.__grid['dz']\n \n # well package\n # Remember to use zero-based layer, row, column indices!\n lcoordw=np.zeros((self.__ncwells,3),dtype=np.int32)\n for i in range (self.__ncwells):\n lcoordw[i,0]=floor((self.__dfclst.iloc[i,3]-minx)/rx)\n #In MODFLOW y ans z coordinates are inverted\n lcoordw[i,1]=floor((miny+ry*ny-self.__dfclst.iloc[i,4])/ry)\n lcoordw[i,2]=floor((minz+rz*nz-self.__dfclst.iloc[i,5])/rz)\n \n nper=self.__df.getForcPer()\n ssm_data = {}\n print('Number of conc periods='+str(nper)) \n for i in range(nper):\n lst=[]\n for j in range(self.__ncwells):\n conc_rate=self.__dfcwells.iloc[i+1,j+1]\n lst.append( [ lcoordw[j,2], lcoordw[j,1], lcoordw[j,0], conc_rate, itype['WEL'] ] )\n ssm_data[i]=lst\n print(ssm_data)\n \n print('*--- Succesfull reading of concentration wells ---*')\n \n return ssm_data",
"def nircam_get_polynomial_forward(apName, siaf_aperture_definitions, coldfit_name_mapping, coldfit_source_data, makeplot=False, test=False, verbose=False):\n \n aperture_name_list = siaf_aperture_definitions['AperName'].tolist()\n\n for row, name in enumerate(aperture_name_list):\n if name == apName:\n r = row\n\n # read in hardcoded dictionary\n apSys = coldfit_name_mapping[apName]\n xref = siaf_aperture_definitions['XDetRef'][r]\n yref = siaf_aperture_definitions['YDetRef'][r]\n if xref == '':\n xref = 1024.5 # Allow for COMPOUND types\n else:\n xref = float(xref) # with no pixel information\n if yref == '':\n yref = 1024.5\n else:\n yref = float(yref)\n\n print(apName, xref, yref)\n\n order = 5 # polynomial order\n terms = (order + 1) * (order + 2) // 2 # number of poly coeffs\n A = np.zeros((terms))\n B = np.zeros((terms))\n\n part1 = False\n part2 = False\n\n # read parameters from cold_fit_[] file\n for line in coldfit_source_data:\n column = line.split(',')\n modelname = column[0].strip()\n if modelname==apSys['pixels_to_mm']:\n a0 = float(column[7])\n a1 = float(column[9])\n a2 = float(column[8])\n b0 = float(column[28])\n b1 = float(column[30])\n b2 = float(column[29])\n part1 = True\n if verbose:\n print('a', a0, a1, a2)\n print('b', b0, b1, b2)\n\n # find transformation to OTESKY\n if modelname==apSys['mm_to_degrees']:\n for i in range(terms):\n A[i] = float(column[i + 7])\n B[i] = float(column[i + 28])\n (A1, B1) = polynomial.reorder(A, B)\n part2 = True\n if verbose:\n print(' Before combining')\n print('A1')\n polynomial.print_triangle(A1)\n print('B1')\n polynomial.print_triangle(B1)\n\n if not (part1 and part2):\n print('Incomplete Transform')\n return\n\n # Combine transformations\n delta = a1 * b2 - a2 * b1\n alpha = (b2 * a0 - a2 * b0) / delta\n beta = (-b1 * a0 + a1 * b0) / delta\n AT = polynomial.transform_coefficients(A1, a1, a2, b1, b2)\n BT = polynomial.transform_coefficients(B1, a1, a2, b1, b2)\n ATS = polynomial.shift_coefficients(AT, alpha, beta)\n BTS = polynomial.shift_coefficients(BT, alpha, beta)\n\n # Generate polynomials in terms of V2V3 in arcsec\n AF = 3600 * ATS\n BF = -3600 * BTS\n BF[0] = BF[0] - 468.0\n\n if makeplot:\n # Plot axes in V2V3 coords\n (V20, V30) = (AF[0], BF[0])\n print('Bottom Left Corner', V20, V30)\n V2x = polynomial.poly(AF, 2048.0, 0.0)\n V3x = polynomial.poly(BF, 2048.0, 0.0)\n V2y = polynomial.poly(AF, 0.0, 2048.0)\n V3y = polynomial.poly(BF, 0.0, 2048.0)\n V2opp = polynomial.poly(AF, 2048.0, 2048.0)\n V3opp = polynomial.poly(BF, 2048.0, 2048.0)\n V2c = polynomial.poly(AF, 1024.0, 1024.0)\n V3c = polynomial.poly(BF, 1024.0, 1024.0)\n print('Center', V2c, V3c)\n\n P.figure(1)\n # P.clf()\n P.plot((V2x, V20, V2y), (V3x, V30, V3y))\n P.plot((V2x, V2opp, V2y), (V3x, V3opp, V3y), linestyle='dashed')\n P.plot((V2c), (V3c), 'rx', ms=10.0)\n P.grid(True)\n P.axis('equal')\n P.text(V2x, V3x, 'X')\n P.text(V2y, V3y, 'Y')\n P.text(V2c, V3c, apName)\n P.title('NIRCam')\n P.xlabel('<---V2')\n P.ylabel('V3 --->')\n # V2 to the left\n (l, r) = P.xlim()\n P.xlim(r, l)\n\n # Shift to reference point (xref=1024.5, yref=1024.5 hardcoded for COMPOUND apertures. OK?)\n AFS = polynomial.shift_coefficients(AF, xref, yref)\n BFS = polynomial.shift_coefficients(BF, xref, yref)\n\n if test:\n xy = input('x y positions ')\n x = float(xy.split(',')[0])\n y = float(xy.split(',')[1])\n # Two step calculation\n xm = a0 + a1 * x + a2 * y\n ym = b0 + b1 * x + b2 * y\n xan = polynomial.poly(A1, xm, ym, 5)\n yan = polynomial.poly(B1, xm, ym, 5)\n v2 = 3600 * xan\n v3 = -3600 * (yan + 0.13)\n print('\\n Two step forward calculation')\n print(x, y, xm, ym, xan, yan)\n print(v2, v3)\n\n v21 = polynomial.poly(AF, x, y, 5)\n v31 = polynomial.poly(BF, x, y, 5)\n print('One step')\n print(v21, v31)\n\n xr = x - 1024.5\n yr = y - 1024.5\n v2r = polynomial.poly(AFS, xr, yr, 5)\n v3r = polynomial.poly(BFS, xr, yr, 5)\n print('Shifted')\n print(v2r, v3r)\n\n return (AFS, BFS)",
"def get_ctffind_4_1_0_extract_dict() -> typing.Dict[str, str]:\n return {\n 'version': r'.*CTFFind version ([^, ]*).*',\n 'MicrographNameNoDW': r'.*Input file: ([^ ]*).*',\n 'PixelSize': r'.*Pixel size: ([^ ]*).*',\n 'Voltage': r'.*acceleration voltage: ([^ ]*).*',\n 'SphericalAberration': r'.*spherical aberration: ([^ ]*).*',\n 'AmplitudeContrast': r'.*amplitude contrast: ([^ ]*).*',\n }",
"def _detectors1(self, hdr):\n d = {}\n d['FCs'] = self._exit_slits(hdr)\n\n for n in range(1, 6):\n det = 'Detector {}'.format(n)\n d[det] = self._exit_slits(hdr)\n\n d['LD'] = {}\n d['LD']['exit slit width'], d['LD']['exit slit coeff a'], \\\n d['LD']['exit slit coeff b'], d['E0S'], \\\n d['pressure multicollection chamber'], \\\n d['FCs']['fc background setup positive'], \\\n d['FCs']['fc background setup negative'] = \\\n unpack(self._bo + '4d 32s 2i', hdr.read(72))\n\n d['pressure multicollection chamber'] = \\\n self._cleanup_string(d['pressure multicollection chamber'])\n\n for n in range(1, 6):\n det = 'Detector {}'.format(n)\n d[det].update(self._electron_multiplier(hdr))\n\n d['LD'].update(self._electron_multiplier(hdr))\n\n d['EMBig'] = self._exit_slits(hdr)\n d['EMBig'].update(self._electron_multiplier(hdr))\n\n # 8 bytes unused\n hdr.seek(8, 1)\n return d",
"def construct_param_dict(params,K_RC,K_CP,m_P):\n ###scaling constants\n w=params['w']\n pd=params['pd'] # in 3D and 0.21 in 2D\n pv=params['pv']\n Er=params['Er'] ;Ek=params['Ek']\n ER=params['ER'];EC=params['EC'];EP=params['EP'];\n Eq1=params['Eq1'];Eq2=params['Eq2']\n\n\n #capture success function\n a = params['a']\n b = params['b']\n c = params['c']\n formC = params['formC']\n formPC = params['formPC']\n formPR = params['formPR']\n \n ###variables\n TR= params['TR'] ;TC= params['TC'];TP=params['TP'];D_R= params['D_R']; D_C= params['D_C']\n K_RP=K_RC*K_CP\n fmC=params['fmC'];thermyR=params['thermyR']\n thermyC=params['thermyC'];thermyP=params['thermyP']\n fmPR=params['fmPR']\n fmPC=params['fmPC']\n m_C = K_CP*m_P;m_R = K_RP*m_P\n ###normalization constants and boltzmann constant\n r0 = params['r0']\n k0 = params['k0'] # will depend on the productivity of the habitat\n a01 = a02 = params['a012'] # will depedend on the dimension of the habitat \n a03 = params['a03']\n d0= params['d0']\n q10 = params['q10'];q20 = params['q20'];\n v0R = params['v0R'];v0C =params['v0C'];v0P =params['v0P'];k = b_k\n hC0 = params['hC0'];hP0 = params['hP0'] \n \n #intrapopulation parameters\n q1=set_q1(q10,m_C,w,Eq1,TR,k)\n q2=set_q2(q20,m_P,w,Eq2,TC,k)\n K=set_K(k0,m_R,w,Ek,TR,k)\n r=set_r(r0,m_R,w,Er,TR,k)\n\n #interpopulation parameters\n a1=set_alfa(m_C,a01,K_RC,pv,pd,TR,TC,ER,EC,D_R,v0R,v0C,g,alfa,fmC,thermyR,thermyC,k,a,b,c,formC)\n a2=set_alfa(m_P,a02,K_RP,pv,pd,TR,TP,ER,EP,D_R,v0R,v0P,g,alfa,fmPR,thermyR,thermyP,k,a,b,c,formPR)\n a3=set_alfa(m_P,a03,K_CP,pv,pd,TC,TP,EC,EP,D_C,v0C,v0P,g,alfa,fmPC,thermyC,thermyP,k,a,b,c,formPC)\n\n t_hp = set_th(hP0,m_P,w,EP,k,TP)\n t_hc = set_th(hC0,m_C,w,EC,k,TC)\n param_dict={'q1':q1,'q2':q2,'K':K,'r':r,'a1':a1,'a2':a2,'a3':a3,'t_hp':t_hp,'t_hc':t_hc}\n \n return param_dict",
"def _coefficients(regression_df):\n coeff_names = ('mindist', 'x_j', 'f_is', 'v_is')\n coefficients = {x: _get_coefficient(regression_df, x) for x in coeff_names}\n return coefficients",
"def _detectors2(self, hdr):\n # Called AnalysisParam in OpenMIMS, first part only\n # presets separate, last part in _detectors3\n d = {}\n d['Detector 6'] = self._exit_slits(hdr)\n d['Detector 6'].update(self._electron_multiplier(hdr))\n d['Detector 7'] = self._exit_slits(hdr)\n d['Detector 7'].update(self._electron_multiplier(hdr))\n d['exit slit xl'] = unpack(self._bo + '70i', hdr.read(280))\n return d",
"def addDrizKeywords(self, hdr, versions):\n\n # Extract some global information for the keywords\n _geom = 'User parameters'\n\n _imgnum = 0\n for pl in self.parlist:\n\n # Start by building up the keyword prefix based\n # on the image number for the chip\n # _keyprefix = 'D%03d'%_imgnum\n _imgnum += 1\n\n drizdict = DRIZ_KEYWORDS.copy()\n # Update drizdict with current values\n # Any limit on the length of the strings was removed as an update to\n # new versions of the FITS standard and to accommodate MVM processing.\n drizdict['VER']['value'] = pl['driz_version']\n drizdict['DATA']['value'] = pl['data']\n drizdict['DEXP']['value'] = pl['exptime']\n drizdict['OUDA']['value'] = pl['outFinal']\n drizdict['OUWE']['value'] = pl['outWeight']\n if pl['outContext'] is None:\n outcontext = \"\"\n else:\n outcontext = pl['outContext']\n drizdict['OUCO']['value'] = outcontext\n if self.single:\n drizdict['MASK']['value'] = pl['singleDrizMask']\n else:\n drizdict['MASK']['value'] = pl['finalMask']\n\n # Process the values of WT_SCL to be consistent with\n # what IRAF Drizzle would output\n if 'wt_scl_val' in pl:\n _wtscl = pl['wt_scl_val']\n else:\n if pl['wt_scl'] == 'exptime': _wtscl = pl['exptime']\n elif pl['wt_scl'] == 'expsq': _wtscl = pl['exptime'] * pl['exptime']\n else: _wtscl = pl['wt_scl']\n\n drizdict['WTSC']['value'] = _wtscl\n drizdict['KERN']['value'] = pl['kernel']\n drizdict['PIXF']['value'] = pl['pixfrac']\n drizdict['OUUN']['value'] = self.units\n if pl['fillval'] is None:\n _fillval = 'INDEF'\n else:\n _fillval = pl['fillval']\n drizdict['FVAL']['value'] = _fillval\n drizdict['WKEY']['value'] = pl['driz_wcskey']\n\n drizdict['SCAL'] = {'value': pl['scale'], 'comment': 'Drizzle, pixel size (arcsec) of output image'}\n drizdict['ISCL'] = {'value': pl['idcscale'], 'comment': 'Drizzle, default IDCTAB pixel size(arcsec)'}\n\n # Now update header with values\n writeDrizKeywords(hdr, _imgnum, drizdict)\n del drizdict\n\n # Add version information as HISTORY cards to the header\n if versions is not None:\n ver_str = \"AstroDrizzle processing performed using: \"\n hdr.add_history(ver_str)\n for k in versions.keys():\n ver_str = ' ' + str(k) + ' Version ' + str(versions[k])\n hdr.add_history(ver_str)",
"def cpsd(self):\r\n self.welch_method = self.method\r\n self.welch_method['this_method'] = 'welch'\r\n self.welch_method['Fs'] = self.input.sampling_rate\r\n f, spectrum_welch = tsa.get_spectra(self.input.data,\r\n method=self.welch_method)\r\n\r\n return f, spectrum_welch",
"def get_pv_keywords(header):\n cd = np.matrix([[header.get('CD1_1', 0.0), header.get('CD1_2', 0.0)],\n [header.get('CD2_1', 0.0), header.get('CD2_2', 0.0)]], dtype=np.float64)\n pv1 = np.zeros((40,), dtype=np.float64)\n pv2 = np.zeros((40,), dtype=np.float64)\n for k in range(40):\n pv1[k] = header.get('PV1_%d' % k, 0.0)\n pv2[k] = header.get('PV2_%d' % k, 0.0)\n return cd, pv1, pv2"
] | [
"0.6472098",
"0.57686627",
"0.57133174",
"0.55999637",
"0.55356216",
"0.5518358",
"0.5444714",
"0.5396722",
"0.5379888",
"0.5344746",
"0.5342612",
"0.534053",
"0.52858293",
"0.5228",
"0.5175586",
"0.5174878",
"0.51518154",
"0.514766",
"0.5131736",
"0.51092917",
"0.51056576",
"0.5086323",
"0.5061396",
"0.50597197",
"0.5047922",
"0.50315493",
"0.50292325",
"0.50256366",
"0.50238883",
"0.5021276"
] | 0.6787802 | 0 |
Returns the parent of self. If self is root ('/'), parent returns None. You much check the output of parent before using the value. Notcie that parent of root in the shell is '/', so this is a semantic difference between FarmFS and POSIX. | def parent(self):
if self._path == sep:
return None
elif self._parent is None:
self._parent = Path(first(split(self._path)))
return self._parent
else:
return self._parent | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parent(self):\n return self if self.is_root else self.__parent",
"def get_parent(self):\n if self.parent:\n return self.parent()\n else:\n return None",
"def find_parent(self):\n parent = self._parent\n if parent:\n return parent\n elif not self.is_root:\n psobj = self.get_sobj().GetFather()\n parent = self.__class__(self._std, self._bld, psobj.GetID())\n self._parent = parent\n return parent",
"def parent(self):\n if self._parent is not None:\n return self._parent()\n else:\n return None",
"def _determine_parent(self, caller):\n self.msgin(4, \"determine_parent\", caller)\n\n parent = None\n if caller:\n pname = caller.identifier\n\n if isinstance(caller, Package):\n parent = caller\n\n elif '.' in pname:\n pname = pname[:pname.rfind('.')]\n parent = self.findNode(pname)\n\n elif caller.packagepath:\n # XXX: I have no idea why this line\n # is necessary.\n parent = self.findNode(pname)\n\n self.msgout(4, \"determine_parent ->\", parent)\n return parent",
"def get_parent(self):\n return BinaryNode.or_none(self.parent)",
"def parent(self):\n if self.__parent is None:\n return None\n parent = self.__parent()\n if parent is None:\n self.__parent = parent\n return parent",
"def get_parent(self) :\n return self.parent",
"def parent(self):\n if not self._parents:\n return None\n elif len(self._parents) == 1:\n return tuple(self._parents)[0]\n else:\n raise RuntimeError('Ambiguous parent: there are multiple parents.')",
"def return_parent(self):\n # Return parent if completed\n if self.completed:\n return self.father\n return -1",
"def parent_dir(self):\n parent = os.path.dirname(self.dirn)\n if self.is_subdir:\n parent = os.path.basename(parent)\n else:\n if self.platform is not None and parent.endswith(self.platform):\n parent = parent[:-len(self.platform)].rstrip(os.sep)\n if self.year is not None and parent.endswith(str(year)):\n parent = parent[:-len(str(year))].rstrip(os.sep)\n return parent",
"def get_parent(self):\n if not self._parent:\n self._parent = yield self.parent_resource.get(self.parent_id)\n\n raise Return(self._parent)",
"def parent(self):\n result = self.get_parent(\n identifier=DEFAULT_PARENT_IDENTIFIER,\n relationship=CommCareCaseIndexSQL.CHILD\n )\n return result[0] if result else None",
"def get_parent(self):\n return self.parent",
"def get_parent(self):\n return self.parent",
"def get_parent(self):\n return self.parent",
"def parent_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"parent_id\")",
"def parent_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"parent_id\")",
"def get_parent(self):\n return self._find_by_locator().parent",
"def get_parent(self):\n return self._parent",
"def get_parentURI(self):\n # A container in CDMI has a '/' at the end but we don't (except for the\n # root)\n parent_path = self.collection.container\n if parent_path not in ('/', 'null'):\n parent_path = \"{}\".format(parent_path)\n return \"{}\".format(parent_path)",
"def GetParentFileEntry(self):\n location = getattr(self.path_spec, 'location', None)\n if location is not None:\n parent_location = self._file_system.DirnamePath(location)\n if parent_location == '':\n parent_location = self._file_system.PATH_SEPARATOR\n\n parent_file_reference = None\n mft_attribute = getattr(self.path_spec, 'mft_attribute', None)\n if mft_attribute is not None:\n parent_file_reference = (\n self._fsntfs_file_entry.get_parent_file_reference_by_attribute_index(\n mft_attribute))\n else:\n parent_file_reference = (\n self._fsntfs_file_entry.get_parent_file_reference())\n\n if parent_file_reference is None:\n return None\n\n parent_mft_entry = (\n parent_file_reference & self._FILE_REFERENCE_MFT_ENTRY_BITMASK)\n\n parent_path_spec = getattr(self.path_spec, 'parent', None)\n # TODO: determine and pass the mft_attribute of the parent\n # for a faster resolve of the file entry.\n path_spec = ntfs_path_spec.NTFSPathSpec(\n location=parent_location, mft_entry=parent_mft_entry,\n parent=parent_path_spec)\n\n # TODO: handle parent correctly use attribute index?\n is_root = bool(\n parent_location == self._file_system.LOCATION_ROOT or\n parent_mft_entry == self._file_system.MFT_ENTRY_ROOT_DIRECTORY)\n\n return NTFSFileEntry(\n self._resolver_context, self._file_system, path_spec, is_root=is_root)",
"def get_parent_id(self):\n return self._parent_id",
"def _parent_path(cls,path):\n # os.path.dirname(), but strip directories like files (like unix basename)\n # Treat directories like files...\n if path[-1]=='/':\n path=path[:-1]\n ret = os.path.dirname(path)\n return ret",
"def get_parent(self) -> Optional[\"BaseSegment\"]:\n if not self._parent:\n return None\n _parent = self._parent()\n if not _parent or self not in _parent.segments:\n return None\n return _parent",
"def get_parent(self):\n return self.__parent",
"def get_parent(self):\n return self.__parent",
"def parent(self):\r\n if not self._meta.parent:\r\n return None\r\n\r\n if not self.__parent__:\r\n self.__parent__ = self._meta.parent()\r\n\r\n return self.__parent__",
"def parent(self, node):\n self._validate_node(node)\n idx = node._index\n if idx == 0:\n return None # Root node has no parent\n if idx % 2 == 0:\n return self._array[(idx-2)//2] # Right child (even number)\n return self._array[(idx-1)//2] # left child (odd number)",
"def parent(self):\n return getattr(self, \"parent_%s\" % self.discriminator)"
] | [
"0.78836995",
"0.77781475",
"0.76843834",
"0.7628968",
"0.75214714",
"0.75041956",
"0.74604297",
"0.7354661",
"0.734197",
"0.72440976",
"0.7241017",
"0.7235812",
"0.72275674",
"0.72217995",
"0.72217995",
"0.72217995",
"0.7179848",
"0.7179848",
"0.71753633",
"0.71729606",
"0.7147578",
"0.7133115",
"0.713001",
"0.71210414",
"0.71207786",
"0.7110767",
"0.7110767",
"0.7105586",
"0.7098647",
"0.7073136"
] | 0.81464714 | 0 |
Creates a hard link to dst. dst DNE Dir F SLF SLD SLB s DNR R R N N R R e Dir R R R R R R l F R R R R ? ? f SL R R R R ? ? R means raises. N means new hardlink created. | def link(self, dst):
assert isinstance(dst, Path)
link(dst._path, self._path) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def lnh(src, dst):\n os.link(src, dst)",
"def link(self, src, dst, label=None):\n self._tag(dst, label)\n self._mkdir_for(dst)\n abs_src = self._rootjoin(src)\n abs_dst = os.path.join(self.chroot, dst)\n try:\n os.link(abs_src, abs_dst)\n except OSError as e:\n if e.errno == errno.EEXIST:\n # File already exists, skip\n pass\n elif e.errno == errno.EXDEV:\n # Hard link across devices, fall back on copying\n shutil.copyfile(abs_src, abs_dst)\n else:\n raise",
"def link(self, src, dst, label=None):\r\n self._tag(dst, label)\r\n self._mkdir_for(dst)\r\n abs_src = self._rootjoin(src)\r\n abs_dst = os.path.join(self.chroot, dst)\r\n try:\r\n os.link(abs_src, abs_dst)\r\n except OSError as e:\r\n if e.errno == errno.EEXIST:\r\n # File already exists, skip\r\n pass\r\n elif e.errno == errno.EXDEV:\r\n # Hard link across devices, fall back on copying\r\n shutil.copyfile(abs_src, abs_dst)\r\n else:\r\n raise",
"def link(self, src, dst, label=None):\r\n self._tag(dst, label)\r\n self._mkdir_for(dst)\r\n abs_src = self._rootjoin(src)\r\n abs_dst = os.path.join(self.chroot, dst)\r\n try:\r\n os.link(abs_src, abs_dst)\r\n except OSError as e:\r\n if e.errno == errno.EEXIST:\r\n # File already exists, skip\r\n pass\r\n elif e.errno == errno.EXDEV:\r\n # Hard link across devices, fall back on copying\r\n shutil.copyfile(abs_src, abs_dst)\r\n else:\r\n raise",
"def _tryLink(self,src, dst):\n\n hiero.core.log.info(\"Attempting to link %s to %s\" % (src, dst))\n \n try:\n os.link(util.asUnicode(src), util.asUnicode(dst))\n except OSError as err:\n # If the OS returns an ENOTSUP error (45), for example when trying to set\n # flags on an NFS mounted volume that doesn't support them, Python should\n # absorb this. However, a regression in Python 2.7.3 causes this not to\n # be the case, and the error is thrown as an exception. We therefore\n # catch this explicitly as value 45, since errno.ENOTSUP is not defined\n # in Python 2.7.2 (which is part of the problem). See the following\n # link for further information: http://bugs.python.org/issue14662\n # See TP 199072.\n if err.errno == 45: # ENOTSUP\n pass\n elif err.errno == 17: # FILE EXISTS\n raise\n else:\n raise",
"def add_link (self, src, dst):\n raise NotImplementedError",
"def add_sglink (self, src, dst):\n raise NotImplementedError",
"def create_symlink(src: str, dst: str) -> bool:\n if exists(src):\n with suppress(Exception):\n if isfile(dst):\n remove(dst)\n else:\n rmtree(dst)\n\n try:\n\n symlink(src, dst)\n return True\n\n except PermissionError as err:\n printer(\n \"User without permission to create the symbolic link.\",\n str(err),\n foreground=FG().ERROR,\n )\n return False\n\n except FileExistsError:\n remove(dst)\n symlink(src, dst)\n return False",
"def force_link(src, dst):\n try:\n os.unlink(dst)\n os.link(src, dst)\n except OSError:\n os.link(src, dst)",
"def add_link(self, src_node_id, dst_node_id, src_port, dst_port, capacity=10000000):\n\n # Get function name\n fname = sys._getframe().f_code.co_name\n \n # Do not add the link if the nodes are not in the topology\n if src_node_id not in self.nodes or dst_node_id not in self.nodes:\n #print(\"{}: Nodes {} and/or {} not found - can't add link\".\n # format(fname, src_node_id, dst_node_id),\n # file=sys.stderr)\n return\n\n # Do not add the link if the link already exists\n for i in range(0, len(self.neighbors[src_node_id])):\n neighbor_id = self.neighbors[src_node_id][i][\"dst_node_id\"]\n if (neighbor_id == dst_node_id and\n self.neighbors[src_node_id][i][\"src_port\"] == src_port and\n self.neighbors[src_node_id][i][\"dst_port\"] == dst_port):\n # print(\"{}: link already exists - exiting\".format(fname))\n return\n\n # Do not add the link if the link already exists\n for i in range(0, len(self.neighbors[dst_node_id])):\n neighbor_id = self.neighbors[dst_node_id][i][\"dst_node_id\"]\n if (neighbor_id == src_node_id and\n self.neighbors[dst_node_id][i][\"src_port\"] == dst_port and\n self.neighbors[dst_node_id][i][\"dst_port\"] == src_port):\n # print(\"{}: link already exists - exiting\".format(fname))\n return\n\n # Derive src/dst interfaces\n # Want to make a version of this function in the Topology class. Not\n # good architecture\n src_int = self.mgr.get_interface(src_node_id, src_port)\n dst_int = self.mgr.get_interface(dst_node_id, dst_port)\n \n # Destination entry in the topology\n src_entry = {\n \"src_node_id\": src_node_id,\n \"dst_node_id\": dst_node_id,\n \"src_port\": src_port,\n \"dst_port\": dst_port,\n \"src_int\": src_int,\n \"dst_int\": dst_int,\n \"bps_reserved\": 0,\n \"bps_current\": 0,\n \"bps_capacity\": capacity,\n \"cur_bytes_sent\": 0,\n \"cur_bytes_recvd\": 0,\n \"prev_bytes_sent\": 0,\n \"prev_bytes_recvd\": 0,\n \"utilization_pct\": 0.0\n }\n self.neighbors[src_node_id].append(src_entry)\n\n # Destination entry in the topology\n dst_entry = {\n \"src_node_id\": dst_node_id,\n \"dst_node_id\": src_node_id,\n \"src_port\": dst_port,\n \"dst_port\": src_port,\n \"src_int\": dst_int,\n \"dst_int\": src_int,\n \"bps_reserved\": 0,\n \"bps_current\": 0,\n \"bps_capacity\": capacity,\n \"cur_bytes_sent\": 0,\n \"cur_bytes_recvd\": 0,\n \"prev_bytes_sent\": 0,\n \"prev_bytes_recvd\": 0,\n \"utilization_pct\": 0.0\n }\n self.neighbors[dst_node_id].append(dst_entry)\n\n # Add the link to self.links if it does not already exist\n # if ((src_port, dst_port) not in self.links and\n # (dst_port, src_port) not in self.links):\n # self.links[(src_port, dst_port)] = src_index\n # self.links[(dst_port, dst_port)] = dst_index\n \n self.l += 1",
"def _prepare_dst_dir(self, dst, src=None, perm=None, **kwargs):\n rstat = self.exists(dst, stat=True)\n\n if rstat:\n if self.file_interface.isdir(dst, stat=rstat) and src:\n full_dst = os.path.join(dst, os.path.basename(src))\n else:\n full_dst = dst\n\n else:\n # interpret dst as a file name, create missing dirs\n dst_dir = self.dirname(dst)\n if dst_dir and self.create_file_dir and not self.isdir(dst_dir):\n self.mkdir(dst_dir, perm=perm, recursive=True, **kwargs)\n full_dst = dst\n\n return full_dst",
"def addLinkNamedIfce(self, src, dst, *args, **kwargs):\n self.addLink(src, dst,\n intfName1=\"-\".join((src, dst)),\n intfName2=\"-\".join((dst, src)),\n * args, **kwargs\n )",
"def _link(filename, existing_filename):\n CreateHardLinkW(filename, existing_filename, 0)",
"def add_edge (self, src, dst, link):\n if isinstance(src, Node):\n src = src.id\n elif isinstance(src, Port):\n src = src.node.id\n if isinstance(dst, Node):\n dst = dst.id\n elif isinstance(dst, Port):\n dst = dst.node.id\n self.network.add_edge(src, dst, key=link.id)\n self.network[src][dst][link.id] = link",
"def _prepare_dst_dir(self, dst, src=None, perm=None, **kwargs):\n if self.isdir(dst):\n full_dst = os.path.join(dst, os.path.basename(src)) if src else dst\n\n elif self.isfile(dst):\n full_dst = dst\n\n else:\n # interpret dst as a file name, create missing dirs\n dst_dir = self.dirname(dst)\n if dst_dir and self.create_file_dir and not self.isdir(dst_dir):\n self.mkdir(dst_dir, perm=perm, recursive=True)\n full_dst = dst\n\n return full_dst",
"def add_link(self, src, dst, src_port, dst_port, weight = 1):\n\t\tif src not in self.switches_adj:\n\t\t\tself.switches_adj[src] = []\n\t\tself.switches_adj[src].append(dst)\t\n\n\n\t\t#add link and it's attributes\n\t\tif src not in self.links:\n\t\t\tself.links[src] = {}\n\t\tself.links[src][dst] = {}\n\t\tself.links[src][dst]['src_port'] = src_port\n\t\tself.links[src][dst]['dst_port'] = dst_port\n\t\tself.links[src][dst]['weight'] = weight",
"def ln(src, dst):\n os.symlink(src, dst)",
"def symlink_p(src, dst):\n try:\n os.symlink(src, dst)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.islink(dst):\n if os.path.realpath(dst) == os.path.realpath(src):\n pass\n else:\n print('%s is a link already pointing to %s' % (dst, os.path.realpath(dst)), file=sys.stderr)\n else:\n raise",
"def copy_deep(src: str, dst: str, create_dst_dir: bool = False) -> None:\n system_is_darwin = platform.system().lower() == \"darwin\"\n if create_dst_dir:\n mkdir_p(os.path.dirname(dst))\n src_is_link = os.path.islink(src)\n dst_exists = os.path.lexists(dst)\n if os.path.isdir(src) and not src_is_link:\n logging.debug(\"Copying directory {} to {}\".format(src, dst))\n mkdir_p(dst)\n for name in os.listdir(src):\n copy_deep(os.path.join(src, name), os.path.join(dst, name))\n elif src_is_link:\n if dst_exists:\n return\n target = os.readlink(src)\n logging.debug(\"Creating symlink {} -> {}\".format(dst, target))\n os.symlink(target, dst)\n else:\n if dst_exists:\n if not system_is_darwin:\n return\n # Only overwrite the file if the source is newer than the destination.\n if os.path.getmtime(src) <= os.path.getmtime(dst):\n return\n logging.debug(\"Copying file {} to {}\".format(src, dst))\n # Preserve the file attributes.\n shutil.copy2(src, dst)",
"def create_symlink(src, dest):\n sudo('ln -s {} {}'.format(src, dest))",
"def make_symlink(dst, src, silently_move=False):\n dst_dir = os.path.dirname(dst.rstrip(os.path.sep))\n if not os.path.isdir(dst_dir):\n os.makedirs(dst_dir)\n\n # get a temporary directory\n if os.path.exists(dst):\n if silently_move or (((os.path.isfile(dst) or (os.path.isdir(dst)) and\n query_yes_no('Move NSLS-II from userpackages?')))):\n import tempfile\n temp_dir = tempfile.mkdtemp()\n shutil.move(dst, temp_dir)\n print('Previous NSLS-II folder moved to {0}'.format(temp_dir))\n else:\n print('NSLS-II already exists in userpackages. Please move or delete it'\n 'and then re-run setup.py')\n return False\n\n # this symlink does not get removed when pip uninstall vttools is run...\n # todo figure out how to make pip uninstall remove this symlink\n try:\n # symlink the NSLS-II folder into userpackages\n os.symlink(src, dst)\n except AttributeError:\n # you must be on Windows!\n call(['mklink', '/j', dst, src], shell=True)\n\n return True",
"def createLink(self, source, destination):\n log(\"creating link\")\n\n if \"flix\" in source:\n return \"%s\" % +OSUtils.createLink(source, destination)\n return \"0\"",
"def add_sglink (self, src_port, dst_port, hop=None, id=None, flowclass=None,\n tag_info=None, delay=None, bandwidth=None):\n if hop is None:\n hop = EdgeSGLink(src=src_port, dst=dst_port, id=id, flowclass=flowclass,\n tag_info=tag_info, bandwidth=bandwidth, delay=delay)\n self.add_edge(src_port.node, dst_port.node, hop)\n return hop",
"def _symlink_or_copy(src, dst):\n # try to symlink file\n try:\n os.symlink(src, dst)\n print('Creating symlink \"%s\" pointing to \"%s\"' % (dst, src))\n except Exception as ex_symlink:\n # try to copy file\n try:\n shutil.copyfile(src, dst)\n print('Copying file from \"%s\" to \"%s\"' % (src, dst))\n except Exception as ex_copy:\n raise RuntimeError('Could neither symlink nor copy file \"%s\" to \"%s\":\\n- %s\\n- %s' % (src, dst, str(ex_symlink), str(ex_copy)))",
"def create_symlink_dir(src_dir, src_list, dst):\n if not src_list:\n return\n message = \"creating symlink directory at {dst} with files {src_list}\".format(\n dst=dst,\n src_list=pformat(src_list))\n logging.info(message)\n if not os.path.exists(dst):\n os.makedirs(dst)\n for src_file in src_list:\n if not src_file:\n continue\n source = os.path.join(src_dir, src_file)\n destination = os.path.join(dst, src_file)\n if os.path.lexists(destination):\n continue\n try:\n os.symlink(source, destination)\n except Exception as e:\n msg = format_debug(e)\n logging.error(e)",
"def copy_to_se(self, src, dst, create_parent_directory=True):\n mgm, dst = self._safe_split_mgm(dst)\n dst = self._join_mgm_lfn(mgm, dst)\n if create_parent_directory:\n parent_directory = osp.dirname(dst)\n self.create_directory(parent_directory)\n logger.warning('Copying {0} to {1}'.format(src, dst))\n cmd = [ 'xrdcp', '-s', src, dst ]\n svj.core.utils.run_command(cmd)",
"def add_link (self, src_port, dst_port, link=None, id=None, dynamic=False,\n backward=False, delay=None, bandwidth=None):\n if link is None:\n type = Link.DYNAMIC if dynamic else Link.STATIC\n link = EdgeLink(src=src_port, dst=dst_port, type=type, id=id,\n backward=backward, delay=delay, bandwidth=bandwidth)\n else:\n link.src, link.dst = src_port, dst_port\n self.add_edge(src_port.node, dst_port.node, link)\n return link",
"def addPort( self, src, dst, sport=None, dport=None ):\n # Initialize if necessary\n ports = self.ports\n ports.setdefault( src, {} )\n ports.setdefault( dst, {} )\n # New port: number of outlinks + base\n if sport is None:\n src_base = 1 if self.isSwitch( src ) else 0\n sport = len( ports[ src ] ) + src_base\n if dport is None:\n dst_base = 1 if self.isSwitch( dst ) else 0\n dport = len( ports[ dst ] ) + dst_base\n ports[ src ][ sport ] = ( dst, dport )\n ports[ dst ][ dport ] = ( src, sport )\n return sport, dport",
"def is_broken_link(self):\n if not os.path.exists(self.dst):\n if os.path.lexists(self.dst):\n return True\n return False",
"def symlink(self, src, dst):\n return os.symlink(src, dst)"
] | [
"0.6689718",
"0.652778",
"0.6506502",
"0.6506502",
"0.62713677",
"0.6266134",
"0.6220446",
"0.6215565",
"0.59210443",
"0.5917583",
"0.58603233",
"0.5844886",
"0.57999426",
"0.5775267",
"0.5713437",
"0.5691181",
"0.56734186",
"0.566668",
"0.5611633",
"0.55994177",
"0.5585695",
"0.55593204",
"0.553026",
"0.55287504",
"0.5497215",
"0.54709816",
"0.54601485",
"0.5449163",
"0.5444268",
"0.5415472"
] | 0.66363525 | 1 |
Reads src_fd and puts the contents into a file located at self._path. | def copy_fd(self, src_fd, tmpdir=None):
if tmpdir is None:
tmpfn = sameDir
else:
tmpfn = lambda _: tmpdir._path
mode = 'w'
if 'b' in src_fd.mode:
mode += 'b'
with safeopen(self._path, mode, useDir=tmpfn) as dst_fd:
copyfileobj(src_fd, dst_fd) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def send_file(self, src: PathLike, dest: PathLike, force: bool = False):",
"def handle_file(self, source_path, dest_path):\n raise NotImplemented",
"def copyfileobj(fsrc, fdst, length=0):\r\n # Localize variable access to minimize overhead.\r\n if not length:\r\n length = COPY_BUFSIZE\r\n fsrc_read = fsrc.read\r\n fdst_write = fdst.write\r\n total_buf = 0\r\n while True: \r\n buf = fsrc_read(length)\r\n if not buf:\r\n break\r\n fdst_write(buf)\r\n total_buf += len(buf)\r\n ee.emit('onfilecopy', total_buf, size)",
"def send_data(self, fp, dest: PathLike, force: bool = False):",
"def read(self, src):\n self.read_mesh(src)\n self.read_data(src)",
"def _targetFile(self):\n basename = os.path.basename(self.src)\n filename = os.path.join(self.target_dir, basename)\n return open(filename, 'w')",
"def getFile(self, _src, _dst):\n\n #--------------------\n # Reset total size of downloads for all files\n #-------------------------\n self.downloadTracker['totalDownloadSize']['bytes'] = 0\n self.downloadTracker['downloadedSize']['bytes'] = 0\n downloadFolders = []\n\n #-------------------------\n # Remove existing dst files from their local URI\n #-------------------------\n if os.path.exists(_dst):\n os.remove(_dst)\n self.__getFile_requests(_src, _dst)",
"def read_handle(self, handle, filename, events, error):\n self.filehandle.seek(self.end_of_file)\n tailportion = self.filehandle.read()\n sys.stdout.write(tailportion)\n self.end_of_file = os.stat(self.filename).st_size",
"def _source_path_reader(self, src, encoding=\"utf-8\"):\n if src is None:\n return src\n if isinstance(src, dict) and \"content\" in src:\n with tempfile.NamedTemporaryFile(mode=\"w\", encoding=encoding, delete=False) as fp:\n fp.write(src[\"content\"])\n return fp.name\n elif isinstance(src, dict) and \"file\" in src:\n if os.path.exists(src[\"file\"]) is False:\n raise FileNotFound(src)\n return src[\"file\"]\n else:\n raise InvalidParameter(\"The parameter is invalid.\")",
"def copy_file(self, dst, tmpdir=None):\n if tmpdir is None:\n tmpfn = sameDir\n else:\n tmpfn = lambda _: tmpdir._path\n assert isinstance(dst, Path)\n with open(self._path, 'rb') as src_fd:\n with safeopen(dst._path, 'wb', useDir=tmpfn) as dst_fd:\n copyfileobj(src_fd, dst_fd)",
"def run_cmd_file(self, src_file=None):\r\n if src_file is not None:\r\n self.cmd_stream.set_ctl_val(\"src_file_name\", src_file)\r\n \r\n self.cmd_stream.reset(src_file=src_file)\r\n self.cmd_stream.run_cmd_file(src_file=src_file)\r\n ###self.cmd_stream.set_eof(False)\r",
"def download_file(\n src: Union[str, \"FluidPath\"], dest: Path, *, force: bool = False\n) -> None:\n import smart_open\n\n if dest.exists() and not force:\n return None\n src = str(src)\n with smart_open.open(src, mode=\"rb\", compression=\"disable\") as input_file:\n with dest.open(mode=\"wb\") as output_file:\n shutil.copyfileobj(input_file, output_file)",
"def open(self):\n self.f = open(self.join(self.fname), 'rb')",
"def copy(self, src_path: str, tgt_path: str) -> None:",
"def from_file(cls, path_src):\n cp_cond = [os.path.exists(path_src), os.path.isfile(path_src),\n len(path_new) != 0]\n content = \"\"\n\n # read input from file\n if cp_cond[0] and cp_cond[1]:\n with open(path_src) as f:\n content = f.read()\n\n # connect object with file content\n return cls(path_src, inp_string=content, to_file=False)",
"def fs_cat(self, src: str, chunk_size: int = 256) -> None:\n cmd = (\n \"with open('%s') as f:\\n while 1:\\n\"\n \" b=f.read(%u)\\n if not b:break\\n print(b,end='')\" % (src, chunk_size)\n )\n self.exec_(cmd, data_consumer=stdout_write_bytes)",
"def sdp_out_file(self):\n return op.join(self.out_dir, \"read_to_rc_read.sdp\")",
"def open(self):\n self.file = open(self.filename, \"rb\", buffering=self.bufferSize)",
"def _open_fd_r(self):\n self.fd = os.open(self.proxy, os.O_RDONLY)",
"def do_readsourcefile(self, file_name):\r\n self.file_name = file_name\r\n f = open(self.file_name, \"r\")\r\n print(f.read())\r\n f.close()",
"def _copy_file ( self, source, dest ):\n return",
"def _put(self, src_fname, dst_fname):\n logging.info('Transferring file %s to %s', src_fname, self._ip_addr)\n sftp_cli = self._get_sftp_client()\n if sftp_cli is None:\n raise Exception('Not supported without ssh.')\n return sftp_cli.put(src_fname, dst_fname)",
"def file_copy(\n self,\n src: str,\n dest: Optional[str] = None,\n file_system: Optional[str] = None,\n peer: Optional[bool] = False,\n ) -> None:\n if dest is None:\n dest = os.path.basename(src)\n\n if file_system is None:\n file_system = self._get_file_system()\n\n # netmiko's enable_scp\n self.enable_scp()\n self._file_copy(src, dest, file_system)\n if peer:\n self.peer_device._file_copy(src, dest, file_system) # pylint: disable=protected-access\n\n # logging removed because it messes up unit test mock_basename.assert_not_called()\n # for tests test_file_copy_no_peer_pass_args, test_file_copy_include_peer\n # log.info(\"Host %s: File %s transferred successfully.\")",
"def __getFile_httplib(self, _src, _dst):\n\n #-------------------- \n # Pre-download callbacks\n #-------------------- \n self.runEventCallbacks('downloadStarted', _src, -1)\n self.runEventCallbacks('downloading', _src, 0)\n\n\n\n #-------------------- \n # Download\n #-------------------- \n response = self.__httpsRequest('GET', _src)\n data = response.read() \n with open(_dst, 'wb') as f:\n f.write(data) \n\n\n\n #-------------------- \n # Post-download callbacks\n #-------------------- \n self.removeFromDownloadQueue(_src)\n self.runEventCallbacks('downloadFinished', _src)",
"def _copyfileobj(fsrc, fdst, length=0, chunks=0.):\n # Localize variable access to minimize overhead.\n if not length:\n length = 64 * 1024\n fsrc_read = fsrc.read\n fdst_write = fdst.write\n if chunks:\n pbar = tqdm.tqdm(total=int(chunks),\n desc='Downloading file chunks (estimated)',\n unit='chunk',\n dynamic_ncols=True)\n while True:\n buf = fsrc_read(length)\n if not buf:\n break\n fdst_write(buf)\n if chunks:\n pbar.update()",
"def load_file(self, src: str) -> bytes:\n if re.match(\"https?://\", src):\n content = self.load_file_from_url(src)\n else:\n content = self.load_file_from_folders(src)\n return content",
"def send_file_contents(self):\n self.send_comm.send_nolimit(self.ply_dict)\n self.send_comm.send_eof()",
"def write(self, fd):\n if self.__tag:\n fd.write(self.__tag)\n for ii in self.__content:\n fd.write(ii)",
"def copy_file(src, dest):\n with open_local_or_gcs(src, 'r') as h_src:\n with open_local_or_gcs(dest, 'w') as h_dest:\n shutil.copyfileobj(h_src, h_dest)",
"def put_file(self, src_fname, dst_fname):\n dst_fname = os.path.normpath(dst_fname)\n self.mkdirs(os.path.dirname(dst_fname))\n self._put(src_fname, dst_fname)"
] | [
"0.5596338",
"0.5459085",
"0.5378296",
"0.52936655",
"0.52718097",
"0.520541",
"0.5150503",
"0.5127887",
"0.5090162",
"0.5075606",
"0.5064022",
"0.50478476",
"0.500685",
"0.5000948",
"0.49774417",
"0.4976913",
"0.49672064",
"0.4959817",
"0.4932902",
"0.49120706",
"0.49093175",
"0.48966137",
"0.4896436",
"0.48428193",
"0.4836483",
"0.48361883",
"0.483371",
"0.4829034",
"0.47951993",
"0.47658953"
] | 0.66701883 | 0 |
Copy self to path dst. Does not attempt to ensure dst is a valid destination. Raises IsADirectoryError and FileDoesNotExist on namespace errors. The file will either be fully copied, or will not be created. This is achieved via temp files and atomic swap. This API works for large files, as data is read in chunks and sent to the destination. | def copy_file(self, dst, tmpdir=None):
if tmpdir is None:
tmpfn = sameDir
else:
tmpfn = lambda _: tmpdir._path
assert isinstance(dst, Path)
with open(self._path, 'rb') as src_fd:
with safeopen(dst._path, 'wb', useDir=tmpfn) as dst_fd:
copyfileobj(src_fd, dst_fd) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def copy(self):\n source = os.path.abspath(self.path)\n destination = os.path.abspath(self.target)\n\n logger.info(\"Running Copy Method - SOURCE=\\\"{src}\\\" DESTINATION=\\\"{dst}\\\" IGNORE=\\\"{ignore}\\\"\".format(src=source, dst=destination, ignore=self.ignore))\n\n if not os.path.exists(source):\n logger.error(\"\\\"{source}\\\" PATH DOESN'T EXIST. PROGRAM TERMINATED. Please check log file.\".format(source=source))\n\n if self.rules is not None:\n files = self.rules\n else:\n self.create_packet_structure(source)\n files = self.files\n\n for (k,v) in files.items():\n src = os.path.join(source,k)\n dst = os.path.join(destination,v)\n dirpath = os.path.dirname(dst)\n if not os.path.isdir(dirpath):\n logger.info(\"Create directory - \\\"{dst}\\\"\".format(dst=dirpath))\n os.makedirs(dirpath)\n logger.info(\"copy from \\\"{f}\\\" to \\\"{t}\\\"\".format(f=src,t=dst))\n shutil.copyfile(src,dst)\n logger.info(\"OK\")",
"def _copy(self):\n if os.path.isfile(self.source):\n shutil.copy(self.source, self.path)\n elif os.path.isdir(self.source):\n shutil.copytree(self.source, self.path)\n else:\n raise ValueError(\"Local path does not exist\")\n\n self._set_chmod()\n return self.check(force=False)",
"def _copyFile(self, source, dstDir):\n dstFile = os.path.join(dstDir, os.path.basename(source))\n touch = \"/usr/bin/touch\" if OSUtilities.isMacOS() else \"/bin/touch\"\n subprocess.call([touch, dstFile])\n subprocess.call([\"/bin/cp\", source, dstDir])\n self._logger.info(\"Copying file \" + source + \" to \" + dstDir)\n self._numCopiedFiles += 1",
"def copy(self):\n\n if self.path_source is not None:\n full_source_path = os.path.join(\n os.path.expandvars(self.path_source), self.name\n )\n\n if self.sudo:\n spawn.process(\n f'cp -v -- \"{full_source_path}\" \"{self.path_destination}\"',\n sudo=True,\n )\n else:\n message.info(\n f\"Copied: '{full_source_path}' --> '{self.path_destination}'\"\n )\n shutil.copy(full_source_path, self.path_destination)\n else:\n message.error(f\"'{self.name}' has no source from which to copy from.\")",
"def copy(self, src, dst, label=None):\n self._tag(dst, label)\n self._mkdir_for(dst)\n shutil.copyfile(self._rootjoin(src), os.path.join(self.chroot, dst))",
"def copyto(self):\n\n if self.do_nothing_bl is False:\n if os.path.exists(self.transcended_fileP_str) is True and self.overwrite_bl is False:\n log_str = 'The file / direction \"{:s}\" already exists and the will not be overwritten'\n log_obj.debug(log_str.format(self.transcended_fileP_str))\n\n return None\n\n os.makedirs(os.path.dirname(self.transcended_fileP_str), exist_ok = True)\n\n if os.path.exists(self.local_fileP_str) is True:\n if os.path.isdir(self.local_fileP_str) is True:\n if os.path.exists(self.transcended_fileP_str) is True and self.overwrite_bl is True:\n log_obj.debug('Removing existing directory \"{:s}\"'.format(self.transcended_fileP_str))\n shutil.rmtree(self.transcended_fileP_str)\n\n log_obj.debug('Copying to directory \"{:s}\" from \"{:s}\"'.format(self.local_fileP_str,\n self.transcended_fileP_str))\n shutil.copytree(self.local_fileP_str, self.transcended_fileP_str)\n else:\n if os.path.exists(self.transcended_fileP_str) is True and self.overwrite_bl is True:\n log_obj.debug('Removing existing file \"{:s}\"'.format(self.transcended_fileP_str))\n os.remove(self.transcended_fileP_str)\n\n log_obj.debug('Copying to file \"{:s}\" from \"{:s}\"'.format(self.local_fileP_str,\n self.transcended_fileP_str))\n shutil.copy(self.local_fileP_str, self.transcended_fileP_str)\n else:\n log_obj.error('The file \"{:s}\" does not exists'.format(self.local_fileP_str))",
"def copy(self, src, dst, label=None):\r\n self._tag(dst, label)\r\n self._mkdir_for(dst)\r\n shutil.copyfile(self._rootjoin(src), os.path.join(self.chroot, dst))",
"def copy(self, src, dst, label=None):\r\n self._tag(dst, label)\r\n self._mkdir_for(dst)\r\n shutil.copyfile(self._rootjoin(src), os.path.join(self.chroot, dst))",
"def copyAsset(self, src, dst, **kw):\n if self.isfile(src):\n self.copyfile(src, dst)\n else:\n # copy folder\n if not self.exists(dst):\n self.makedirs(dst)\n for name in self.listdir(src):\n self.copyAsset(self.joinpath(src, name), self.joinpath(dst, name), copycache=0)\n\n # copy cache\n cache_src = self.cache_path(src)\n if not os.path.exists(cache_src):\n return\n\n cache_dst = self.cache_path(dst)\n cache_dst_parent = os.path.dirname(cache_dst)\n if not os.path.exists( cache_dst_parent ):\n os.makedirs(cache_dst_parent )\n if not os.path.exists(cache_dst):\n ucopytree(cache_src, cache_dst)",
"def copy_file(filename, dst):\n # Create dir if needed\n dir_path = os.path.dirname(os.path.expanduser(dst))\n if not os.path.isdir(dir_path):\n os.makedirs(dir_path)\n\n src = os.path.join(get_data(''), filename)\n dst = os.path.expanduser(dir_path)\n shutil.copy2(src, dst)",
"def copyfrom(self):\n\n if self.do_nothing_bl is False:\n if os.path.exists(self.transcended_fileP_str) is True:\n os.makedirs(os.path.dirname(self.local_fileP_str), exist_ok = True)\n\n if os.path.isdir(self.transcended_fileP_str) is True:\n log_obj.debug('Copying from directory \"{:s}\" to \"{:s}\"'.format(self.transcended_fileP_str,\n self.local_fileP_str))\n shutil.copytree(self.transcended_fileP_str, self.local_fileP_str)\n else:\n log_obj.debug('Copying from file \"{:s}\" to \"{:s}\"'.format(self.transcended_fileP_str,\n self.local_fileP_str))\n shutil.copy(self.transcended_fileP_str, self.local_fileP_str)\n else:\n log_obj.error('The file \"{:s}\" does not exists'.format(self.transcended_fileP_str))",
"def copyfile(self, destination, **kwargs):\n assert _os.path.isfile(self.__str__()) == True\n _shutil.copyfile(self.__str__(), destination, **kwargs)",
"def copy_one(self, src, dest):\n if self.manager.no_sourcemaps and self.is_ignored_sourcemap(src.name):\n return\n\n if dest.is_dir():\n shutil.rmtree(dest)\n elif dest.exists():\n dest.unlink()\n\n if not dest.parent.exists():\n self.log.debug(f\"creating folder {dest.parent}\")\n dest.parent.mkdir(parents=True)\n\n self.maybe_timestamp(dest.parent)\n\n copytree_kwargs = {}\n\n if self.manager.no_sourcemaps:\n copytree_kwargs[\"ignore\"] = SOURCEMAP_IGNORE_PATTERNS\n\n if src.is_dir():\n shutil.copytree(src, dest, **copytree_kwargs)\n else:\n shutil.copy2(src, dest)\n\n self.maybe_timestamp(dest)",
"def copy_one(self, src, dest):\n if dest.is_dir():\n shutil.rmtree(dest)\n elif dest.exists():\n dest.unlink()\n\n if not dest.parent.exists():\n self.log.debug(f\"creating folder {dest.parent}\")\n dest.parent.mkdir(parents=True)\n\n self.maybe_timestamp(dest.parent)\n\n if src.is_dir():\n shutil.copytree(src, dest)\n else:\n shutil.copy2(src, dest)\n\n self.maybe_timestamp(dest)",
"def _prepare_dst_dir(self, dst, src=None, perm=None, **kwargs):\n rstat = self.exists(dst, stat=True)\n\n if rstat:\n if self.file_interface.isdir(dst, stat=rstat) and src:\n full_dst = os.path.join(dst, os.path.basename(src))\n else:\n full_dst = dst\n\n else:\n # interpret dst as a file name, create missing dirs\n dst_dir = self.dirname(dst)\n if dst_dir and self.create_file_dir and not self.isdir(dst_dir):\n self.mkdir(dst_dir, perm=perm, recursive=True, **kwargs)\n full_dst = dst\n\n return full_dst",
"def _copy_file ( self, source, dest ):\n return",
"def file_copy_form_bcdbfs(self, path, dest):\n source_file = self._file_model.get_by_name(name=path)[0]\n if self.is_dir(dest):\n dest = j.sal.fs.joinPaths(dest, j.sal.fs.getBaseName(path))\n dest_file = self.file_create_empty(dest)\n if source_file.blocks:\n dest_file.blocks = source_file.blocks\n elif source_file.content:\n dest_file.content = source_file.content\n\n dest_file.save()\n return dest_file",
"def copy(self):\n return self.from_string(self.format(), self.filename, ignore_checksum=True)",
"def copy_to_se(self, src, dst, create_parent_directory=True):\n mgm, dst = self._safe_split_mgm(dst)\n dst = self._join_mgm_lfn(mgm, dst)\n if create_parent_directory:\n parent_directory = osp.dirname(dst)\n self.create_directory(parent_directory)\n logger.warning('Copying {0} to {1}'.format(src, dst))\n cmd = [ 'xrdcp', '-s', src, dst ]\n svj.core.utils.run_command(cmd)",
"def copy_tmp_file(self, dst):\n if dst and self.file_exists(self.tmp_file):\n shutil.copyfile(self.tmp_file, dst)",
"def clone(src: str, dst: str):\n if dst is None:\n dst = getcwd()\n destination = path.abspath(dst)\n # TODO: replace with false this is just for testing:\n makedirs(destination, exist_ok=True)\n\n sync_chunk(src, destination)\n copy(src, destination)",
"def copy(self, src_path: str, tgt_path: str) -> None:",
"def _prepare_dst_dir(self, dst, src=None, perm=None, **kwargs):\n if self.isdir(dst):\n full_dst = os.path.join(dst, os.path.basename(src)) if src else dst\n\n elif self.isfile(dst):\n full_dst = dst\n\n else:\n # interpret dst as a file name, create missing dirs\n dst_dir = self.dirname(dst)\n if dst_dir and self.create_file_dir and not self.isdir(dst_dir):\n self.mkdir(dst_dir, perm=perm, recursive=True)\n full_dst = dst\n\n return full_dst",
"def copy(self, destination):\n destination = Path(destination)\n src_base = str(self.directory)\n if self.flatten:\n dst_base = destination\n else:\n dst_base = Path(destination.joinpath(self.directory.stem))\n\n for src in self.locations_to_copy:\n if src.is_dir():\n for dir_path, dir_names, file_names in os.walk(str(src)):\n if self.flatten:\n dst_dir = dst_base\n else:\n dst_dir = Path(dir_path.replace(src_base, str(dst_base)))\n if not dst_dir.exists():\n dst_dir.mkdir(parents=True)\n for file in file_names:\n shutil.copy2(os.path.join(dir_path, file), str(dst_dir))\n else:\n if self.flatten:\n dst_dir = dst_base\n else:\n dst_dir = Path(str(src.parent).replace(src_base, str(dst_base)))\n if not dst_dir.exists():\n dst_dir.mkdir(parents=True)\n shutil.copy2(str(src), str(dst_dir))",
"def copy(self, fname):\n _, ext = osp.splitext(fname)\n spath = osp.join(self.src, fname)\n oname = fname\n path = osp.join(self.dst, oname)\n os.makedirs(osp.dirname(path), exist_ok=True)\n if ext in [\".css\"]:\n content = self.include(fname)\n with open(path, \"wt\") as fp:\n fp.write(content)\n else:\n shutil.copyfile(spath, path)\n return osp.relpath(oname, self.root)",
"def run_copy(self, src, dst):\n pass",
"def copy(src_path, dst_path, src_fs=None, dst_fs=None, overwrite=False):\n src_path = MPath.from_inp(src_path, fs=src_fs)\n dst_path = MPath.from_inp(dst_path, fs=dst_fs)\n\n if not overwrite and dst_path.fs.exists(dst_path):\n raise IOError(f\"{dst_path} already exists\")\n\n # create parent directories on local filesystems\n dst_path.parent.makedirs()\n\n # copy either within a filesystem or between filesystems\n if src_path.fs == dst_path.fs:\n src_path.fs.copy(str(src_path), str(dst_path))\n else:\n # read source data first\n with src_path.open(\"rb\") as src:\n content = src.read()\n # only write to destination if reading source data didn't raise errors,\n # otherwise we can end up with empty objects on an object store\n with dst_path.open(\"wb\") as dst:\n dst.write(content)",
"def copyanything(src, dst):\n try:\n copytree(src, dst, dirs_exist_ok=True)\n except FileExistsError as e: # noqa\n pass\n except OSError as err:\n # TODO(dittrich): This causes a pylint error\n # Not sure what test cases would trigger this, or best fix.\n if err.errno == os.errno.ENOTDIR: # type: ignore\n copy(src, dst)\n else:\n raise\n finally:\n remove_other_perms(dst)",
"def CopyPath(options, src, dst):\n if options.includes:\n if not IncludeFiles(options.includes, [src]):\n return\n\n if options.excludes:\n if not ExcludeFiles(options.excludes, [src]):\n return\n\n if options.verbose:\n print('cp %s %s' % (src, dst))\n\n # If the source is a single file, copy it individually\n if os.path.isfile(src):\n # We can not copy over a directory with a file.\n if os.path.exists(dst):\n if not os.path.isfile(dst):\n msg = \"cp: cannot overwrite non-file '%s' with file.\" % dst\n raise OSError(msg)\n # If the destination exists as a file, remove it before copying to avoid\n # 'readonly' issues.\n os.remove(dst)\n\n # Now copy to the non-existent fully qualified target\n shutil.copy(src, dst)\n return\n\n # Otherwise it's a directory, ignore it unless allowed\n if os.path.isdir(src):\n if not options.recursive:\n print(\"cp: omitting directory '%s'\" % src)\n return\n\n # We can not copy over a file with a directory.\n if os.path.exists(dst):\n if not os.path.isdir(dst):\n msg = \"cp: cannot overwrite non-directory '%s' with directory.\" % dst\n raise OSError(msg)\n else:\n # if it didn't exist, create the directory\n os.makedirs(dst)\n\n # Now copy all members\n for filename in os.listdir(src):\n srcfile = os.path.join(src, filename)\n dstfile = os.path.join(dst, filename)\n CopyPath(options, srcfile, dstfile)\n return",
"def copy_from(self, file_name, from_dir):\n raise NotImplementedError"
] | [
"0.6731077",
"0.6349671",
"0.61228085",
"0.6079704",
"0.60696405",
"0.6063155",
"0.6027516",
"0.6027516",
"0.59982765",
"0.5988888",
"0.5988461",
"0.59474134",
"0.5924743",
"0.5887651",
"0.58503205",
"0.5818022",
"0.5792588",
"0.5786693",
"0.57735986",
"0.5770276",
"0.5768486",
"0.5752785",
"0.57313937",
"0.56827676",
"0.55930185",
"0.55661523",
"0.5552814",
"0.5514974",
"0.55059433",
"0.5484279"
] | 0.6634319 | 1 |
Building paths using conventional POSIX systems will discard CWD if the path is absolute. FarmFS makes passing of CWD explicit so that path APIs are pure functions. Additionally FarmFS path construction doesn't allow for absolute paths to be mixed with frames. This is useful for spotting bugs and making sure that pathing has strong guarantees. However this comes at the expense of user expectation. When dealing with user input, there is an expecation that POSIX semantics are at play. userPath2Path checks to see if the provided path is absolute, and if not, adds the CWD frame. | def userPath2Path(arg, frame):
arg = ingest(arg)
if isabs(arg):
return Path(arg)
else:
return Path(arg, frame) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_path(path: Union[Path, str], path_is_absolute: bool = False) -> Path:\n if not path_is_absolute:\n return Path(os.getcwd()) / path\n if isinstance(path, str):\n return Path(path)\n return path",
"def makePath(path):\n\n compatPath = os.path.abspath(os.path.expanduser(path))\n\n return compatPath",
"def _init_path(path: Union[str, PurePath]) -> Path:\n if not isinstance(path, Path):\n path = Path(path)\n\n path = path.expanduser()\n return path",
"def path_creator(rel_path=''):\n if platform.system() != 'Windows':\n if rel_path == '':\n path_list=sys.argv[0].split('/')[:-1]\n return '/'.join(path_list)\n else:\n path_list = sys.argv[0].split('/')[:-1]\n return '/'.join(path_list) + '/' + rel_path\n else:\n if rel_path == '':\n path_list=sys.argv[0].split('\\\\')[:-1]\n path_res='\\\\'.join(path_list)\n return path_res\n else:\n path_list = sys.argv[0].split('\\\\')[:-1]\n rel_path=rel_path.split('/')\n path_res='\\\\'.join(path_list) + '\\\\' + '\\\\'.join(rel_path)\n return path_res",
"def format_path (in_path):\n return os.path.realpath(os.path.expanduser(in_path))",
"def convert_path(pathname):\n if os.sep == '/':\n return pathname\n if not pathname:\n return pathname\n if pathname[0] == '/':\n raise ValueError(\"path '%s' cannot be absolute\" % pathname)\n if pathname[-1] == '/':\n raise ValueError(\"path '%s' cannot end with '/'\" % pathname)\n\n paths = pathname.split('/')\n while os.curdir in paths:\n paths.remove(os.curdir)\n if not paths:\n return os.curdir\n return os.path.join(*paths)",
"def from_cwd(root, path):\n return normpath(join(root, normpath(path)))",
"def fix_path(path):\n return os.path.abspath(os.path.expanduser(path))",
"def validated_path(basepath, env = None, *path):\n if basepath is not None:\n result = os.path.realpath(os.path.join(os.path.expanduser(basepath), *path))\n\n if env is not None and not os.path.isdir(result):\n env.warn(result + ' not found.')\n\n return result\n else:\n raise ValueError",
"def _fixpath(p):\n return os.path.abspath(os.path.expanduser(p))",
"def get_path(path):\n if _prefix and not '/' in path:\n path = _prefix + path\n\n if not _cwd:\n return path\n\n return join(_cwd, path)",
"def build_path(*path_elements):\n return path.abspath(path.expanduser(path.join(*path_elements)))",
"def translate_path(self, path):\n # abandon query parameters\n path = path.split('?', 1)[0]\n path = path.split('#', 1)[0]\n # Don't forget explicit trailing slash when normalizing. Issue17324\n trailing_slash = path.rstrip().endswith('/')\n path = posixpath.normpath(urllib.unquote(path))\n words = path.split('/')\n words = filter(None, words)\n path = self.working_dir\n for word in words:\n _drive, word = os.path.splitdrive(word)\n _head, word = os.path.split(word)\n if word in (os.curdir, os.pardir):\n continue\n path = os.path.join(path, word)\n if trailing_slash:\n path += '/'\n return path",
"def pwd_expanduser ( fspath, uid ):\n if not fspath or fspath[0] != '~':\n return fspath\n elif len ( fspath ) < 2:\n return get_home_dir ( uid )\n elif fspath[1] == os.sep:\n return get_home_dir ( uid ) + fspath[1:]\n else:\n return fspath",
"def _normalize_path(path):\n if path is None:\n return None\n return os.path.abspath(os.path.expanduser(os.path.expandvars(path)))",
"def _normalized_path(path):\n return os.path.abspath(os.path.expanduser(path))",
"def normalizePath(p: str, *pathParts: List[str]) -> str:\n p1 = os.path.abspath(os.path.expanduser(p))\n if len(pathParts)>0:\n allPathParts = [ p1 ]\n allPathParts.extend(pathParts)\n p1 = os.path.join(*allPathParts)\n p2 = os.path.abspath(p1)\n return p2",
"def make_path_safe(path):\n if path is not None:\n return os.path.abspath(os.path.expanduser(path))\n else:\n return None",
"def _norm_path(filepath):\n return Path(os.path.abspath(os.path.normpath(\n os.path.expandvars(os.path.expanduser(str(filepath))))))",
"def makepath(path):\r\n from os import makedirs\r\n from os.path import normpath, dirname, exists, abspath\r\n\r\n dpath = normpath(dirname(path))\r\n if not exists(dpath): makedirs(dpath)\r\n return normpath(abspath(path))",
"def ensure_path(path):\n\n path = os.path.expanduser(path)\n #Do not take into consideration the last path element\n #Unless it end with '/'\n os.makedirs('/'.join(path.split('/')[:-1]), exist_ok=True)\n return path",
"def _path(unix_path):\n return unix_path.replace(\"/\", os.path.sep)",
"def completePath(path):\n return os.getcwd() + convertString(path)",
"def translate_path(self, path):\n # abandon query parameters\n path = path.split('?', 1)[0]\n path = path.split('#', 1)[0]\n # Don't forget explicit trailing slash when normalizing. Issue17324\n trailing_slash = path.rstrip().endswith('/')\n try:\n path = urllib.parse.unquote(path, errors='surrogatepass')\n except UnicodeDecodeError:\n path = urllib.parse.unquote(path)\n path = posixpath.normpath(path)\n words = path.split('/')\n words = filter(None, words)\n path = os.getcwd()\n for word in words:\n if os.path.dirname(word) or word in (os.curdir, os.pardir):\n # Ignore components that are not a simple file/directory name\n continue\n path = os.path.join(path, word)\n if trailing_slash:\n path += '/'\n return path",
"def abspath(path):\n if not os.path.isabs(path):\n cwd = os.getcwdu()\n path = os.path.join(cwd, path)\n return os.path.normpath(path)",
"def normalizePath(path):\n if path == None or len(path) == 0 or path == '/':\n return '/'\n buff = '/' + path if path[0] != '/' else path\n return buff.replace('//', '/')",
"def translate_path(path, workdir):\n # abandon query parameters\n path = path.split('?', 1)[0]\n path = path.split('#', 1)[0]\n path = urllib.unquote(path)\n parts = path.split('/')\n return os.path.join(workdir, *parts)",
"def pretty_path(input_path):\n home_path = os.path.expanduser('~')\n cwd_path = os.getcwd()\n output_path = input_path.replace(home_path, '~').replace(cwd_path, './')\n return output_path",
"def abspath(fpath):\n from os import path, getcwd, chdir\n original = getcwd()\n chdir(reporoot)\n result = path.abspath(path.expanduser(fpath))\n chdir(original)\n return result",
"def make_local_path(self, *args):\n return os.path.normpath(os.path.join(\n os.path.dirname(api.env.real_fabfile), *args).rstrip(os.path.sep))"
] | [
"0.7044446",
"0.6547335",
"0.64296883",
"0.6427195",
"0.6424488",
"0.6364664",
"0.62077975",
"0.61788577",
"0.6155652",
"0.60604566",
"0.6044922",
"0.6038902",
"0.60300344",
"0.6017157",
"0.59847945",
"0.5975106",
"0.593022",
"0.5901871",
"0.5887952",
"0.5885305",
"0.5880734",
"0.58711845",
"0.5855342",
"0.58081406",
"0.57841796",
"0.57786435",
"0.5761267",
"0.5754057",
"0.5717527",
"0.56995517"
] | 0.67894834 | 1 |
Returns list of objects nonselected in the view. | def get_non_selected(self):
obj_list = self.get_list()
for sel in self.get_selected():
obj_list.remove(sel)
return obj_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def non_hidden(self):\n return self.filter(hidden=False)",
"def non_hidden(self):\n return self.filter(hidden=False)",
"def get_selected_items(self):\n\n datas = [i.data(NODEROLE) for i in self.view.get_indices()]\n items = [d for d in datas if d is not None] # filter Nones\n\n return items",
"def get_non_selected_node_items(self, nodetype=node.RigNode):\n items = self.selectedItems()\n nodeitems = list()\n non_selected_items = [item for item in self.items() if item not in items]\n for non_selected_item in non_selected_items:\n if isinstance(non_selected_item, nodetype):\n nodeitems.append(non_selected_item)\n # end for non_selected_item in non_selected_items\n return nodeitems",
"def get_inititially_selected_queryset(self):\n return self.model.objects.none()",
"def get_unlabelled_documents_queryset(self):\n queryset = self.get_queryset()\n\n # Retrieve labelled IDs\n labelled_ids = self.get_labelled_documents_queryset()\\\n .values_list('document_id', flat=True)\n\n return queryset.exclude(pk__in=labelled_ids)",
"def excluded(cls):\n return []",
"def get_unselected_benefits(cls, excluded_benefits):\n benefits = cls.query.filter(cls.id.notin_(excluded_benefits))\n return [benefit.serialize() for benefit in benefits]",
"def get_non_inheriting_objects(self):\n return get_non_inheriting_objects(self)",
"def non_archived_tags(self):\n non_archived_tagged_traits = apps.get_model('tags', 'TaggedTrait').objects.non_archived().filter(trait=self)\n return apps.get_model('tags', 'Tag').objects.filter(\n pk__in=non_archived_tagged_traits.values_list('tag__pk', flat=True))",
"def dislikes(self):\n return self.get_queryset().filter(vote__lt=0)",
"def _unselected_columns(self, X):\n X_columns = list(X.columns)\n return [column for column in X_columns if\n column not in self._selected_columns]",
"def exclude_list(self):\n pass",
"def exclude_object(qs, obj):\n return qs.exclude(pk=obj.pk)",
"def get_queryset(self):\n return self._get_base_queryset().filter(deleted__isnull=True)",
"def not_use_triggered(self):\n\n self.select_items()\n if self.items_selected:\n for index, item in enumerate(self.items_selected):\n index_selected = self.indices_selected[index]\n frame_selected = index_selected + 1\n item.setText(\"Frame %i excluded\" % frame_selected)\n item.setBackground(self.background_excluded)\n item.setForeground(QtGui.QColor(255, 255, 255))\n self.index_included[index_selected] = False\n self.frame_selector.setPhoto(self.frame_index)",
"def filter_selected_nodes(tree) -> list:\n return [n for n in tree.nodes if n.select and n.bl_idname not in {'LNGroupInputsNode', 'LNGroupOutputsNode'}]",
"def get_queryset(self):\n self.object = self.get_object()\n return self.object.friends.all().exclude(user=self.object.user)",
"def __editDeselectAll(self):\n self.activeWindow().selectAll(False)",
"def getnegates(self):\n from beliefSys import beliefSys as bs\n return bs.negatelist",
"def get_queryset(self, request):\n query = super(GipsyMenu, self).get_queryset(request)\n return query.filter(parent__isnull=True)",
"def get_selected_items(self):\n\n selection_model = self.view.selectionModel()\n items = [row.data(NODEROLE) for row in selection_model.selectedRows(0)]\n\n return items",
"def visible(self, **kwargs):\r\n return self.filter(is_deleted=False, **kwargs)",
"def _get_empty_page(self):\n return self._slice_objects(0, 0)",
"def getAllUnpublishedObjects(self, resource='objects/unpublished'):\n\n objects = list()\n\n for item in self.iterateAllPaginated(resource, vsdModels.APIObject):\n obj = self.getObject(item.selfUrl)\n objects.append(obj)\n return objects",
"def EmptyTarget(self):\n return not self.objects",
"def unisolvent_nodes(self):\r\n return self.grid.unisolvent_nodes",
"def invert_selection(self):\n pass",
"def get_list_if_visible(self, selector, no_highlight=False):\n l = self.get_list(selector, no_highlight=no_highlight)\n return [e for e in l if e.is_displayed()]",
"def unlabeled_set(self):\n # unlabeled set is the query set minus the preselected set\n unlabeled_tag_bitmask = self._query_tag_bitmask - self._preselected_tag_bitmask\n return unlabeled_tag_bitmask.masked_select_from_list(\n self.api_workflow_client.filenames_on_server\n )"
] | [
"0.7010305",
"0.7010305",
"0.67899483",
"0.6676391",
"0.6551073",
"0.6375752",
"0.6311826",
"0.61739516",
"0.61335963",
"0.5950215",
"0.59168136",
"0.59028524",
"0.58810467",
"0.5845068",
"0.58445495",
"0.580032",
"0.5723122",
"0.5704929",
"0.5687304",
"0.5663265",
"0.56493396",
"0.5642583",
"0.56147635",
"0.56000465",
"0.5556039",
"0.55541843",
"0.5532388",
"0.55158246",
"0.5513992",
"0.5503332"
] | 0.8311188 | 0 |
This is a special string; when received it will make all Menu > Objects entries unchecked It mean we clicked outside of the items and deselected all | def on_row_selected(self, obj_name):
if obj_name == 'none':
for act in self.app.ui.menuobjects.actions():
act.setChecked(False)
return
# get the name of the selected objects and add them to a list
name_list = []
for obj in self.get_selected():
name_list.append(obj.options['name'])
# set all actions as unchecked but the ones selected make them checked
for act in self.app.ui.menuobjects.actions():
act.setChecked(False)
if act.text() in name_list:
act.setChecked(True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _generateMenuItemCheckedState(self, obj, **args):\n result = []\n if not args.get('mode', None):\n args['mode'] = self._mode\n args['stringType'] = 'checkbox'\n indicators = self._script.formatting.getString(**args)\n if obj.getState().contains(pyatspi.STATE_CHECKED):\n result.append(indicators[1])\n return result",
"def toggleSelection(self):\n for item in self.getItemsToModify():\n checked = item.checkState() == Qt.Checked\n item.setCheckState(Qt.Unchecked if checked else Qt.Checked)",
"def toggleSelection(self):\n for item in self.getItemsToModify():\n checked = item.checkState() == Qt.Checked\n item.setCheckState(Qt.Unchecked if checked else Qt.Checked)",
"def _uncheck_all(self):\n for item in self.list_cb_data.values():\n data_ctrl, _, _, _, _, _, _, _ = item\n self.tree_ctrl.CheckItem(data_ctrl, False)\n self.enable_append()\n self.enable_freeze()\n self.enable_plot()\n self.enable_import()\n self.enable_remove()",
"def sgnDoubleClickInList(self):\n\n self.uiSwitchSelectedCheckStateInList()",
"def delete_menu():",
"def deSelect(self):\n for i in range(len(self.__controlsChecks)):\n self.__controlsChecks[i].setChecked(False)",
"def on_unselected(self):\n self.colour = self.normal_colour\n self.is_selected = False\n self.redraw()",
"def not_use_triggered(self):\n\n self.select_items()\n if self.items_selected:\n for index, item in enumerate(self.items_selected):\n index_selected = self.indices_selected[index]\n frame_selected = index_selected + 1\n item.setText(\"Frame %i excluded\" % frame_selected)\n item.setBackground(self.background_excluded)\n item.setForeground(QtGui.QColor(255, 255, 255))\n self.index_included[index_selected] = False\n self.frame_selector.setPhoto(self.frame_index)",
"def stateChanged(self, obj, box):\n logger.debug(\"checkbox state changed\")\n if(box.isChecked()==False):\n logger.debug(\"deselect: %s\" % obj)\n cmds.select(obj, d=True) #deselect object\n else:\n logger.debug(\"%s is checked\" % obj)",
"def __editDeselectAll(self):\n self.activeWindow().selectAll(False)",
"def deselect_me(self):\r\n\t\tself.active = False",
"def selectAll(self, value):\n for item in self.getItemsToModify():\n item.setCheckState(Qt.Checked if value else Qt.Unchecked)",
"def selectAll(self, value):\n for item in self.getItemsToModify():\n item.setCheckState(Qt.Checked if value else Qt.Unchecked)",
"def __clear(self):\n for i in range(len(self.buttons_list)):\n self.labels_strvar[i].set(\"\")\n if self.buttons_list[i][\"state\"] == DISABLED:\n self.buttons_list[i][\"state\"] = NORMAL\n self.entered_list = []\n return",
"def hook_frame_unselected(self):",
"def text_select_context_menu_click (selectedtext) :\n\tsettings = Composition.CompositionManager.Get[Interfaces.Settings.IApplicationSettingsProvider]()\n\tsettings.GlobalExclude.Add(selectedtext)",
"def select_deselect_suspect_flag_ignored_checkbox(self):\n self.click_element(self.suspect_flags_ignored_checkbox_locator, True)",
"def get_unchecked_labels(self):\r\n unchecked_labels = []\r\n item_count = self.count()\r\n if item_count < 1:\r\n return unchecked_labels\r\n\r\n for item_index in xrange(item_count):\r\n item = self.item(item_index)\r\n if item is None or item.checkState() != Qt.Unchecked:\r\n continue\r\n unchecked_labels.append(str(item.text()))\r\n return unchecked_labels",
"def remove_checks(self):\n for checkbox in self.checkboxes:\n checkbox.setChecked(False)\n mw.checked_stats = []\n mw.bonuses = {'Charisma': 2}",
"def on_CheckPunish_clicked(self):\n # TODO: not implemented yet\n raise NotImplementedError",
"def onClickCheckbutton(self):\r\n self.app.unbind()\r\n mask = []\r\n for val in self.intvars:\r\n mask.append(val.get())\r\n # Recreate fNIRS Channels with channel mask\r\n self.app.reconfigureChannels(self.app.dataPath,mask)\r\n self.app.bindHotkeys()",
"def disable_not_selected(self, window, values, branch_log_dict, key_event):\n #we need to convert values[element] into the numeric\n #could used deepcopy, but we do not actually need it\n utils.convert_to_numeric(values)\n key_set = set(branch_log_dict[key_event].keys())\n for key in key_set.difference(set([values[key_event]])):\n for element_key in branch_log_dict[key_event][key]:\n if not isinstance(window[element_key], sg.Text) and not isinstance(window[element_key], sg.Listbox):\n window[element_key].update(disabled = True)\n window[element_key].update(value = \"\")\n window[element_key].metadata = False\n window[element_key+\"_label\"].update(text_color = \"#000000\")#every non-text field has a label\n window[element_key].update(visible = False)",
"def setSelected(*args):",
"def setSelected(*args):",
"def DoCheck(self,event):\r\n index = event.GetSelection()\r\n item = self.items[index]\r\n if self.list.IsChecked(index):\r\n self.data.check(item)\r\n else:\r\n self.data.uncheck(item)\r\n #self.list.SetSelection(index)\r",
"def onClearMenu(self, item):\n self.canvas.clear()\n return 1",
"def remove_menu(menu_name):\n\n pass",
"def changeEnableMenus(self, fileObj):\n logging.debug(\"Changing menus...\" + str(fileObj.autoPersonalCleaned))\n self.actionCheck_Content.setEnabled(True)\n self.actionCheck_Content_auto.setEnabled(not fileObj.autoPersonalCleaned)\n self.actionCheck_Content_onebyone.setEnabled(not fileObj.autoPersonalCleaned)\n self.actionCheck_Content_useblur.setEnabled(not fileObj.autoPersonalCleaned)\n\n self.actionCheck_Metadata.setEnabled(True)\n self.actionCheck_Metadata_keep.setEnabled(not fileObj.autoMetaCleaned)\n self.actionCheck_Metadata_recon.setEnabled(not fileObj.reconMetaCleaned)",
"def isSelected(*args):"
] | [
"0.605545",
"0.60228664",
"0.60228664",
"0.5937537",
"0.5895686",
"0.5885168",
"0.58458877",
"0.57850504",
"0.5775188",
"0.5724976",
"0.5644342",
"0.5637988",
"0.5612422",
"0.5612422",
"0.5608615",
"0.55828875",
"0.55805236",
"0.5510928",
"0.5507137",
"0.5500436",
"0.54773974",
"0.5464156",
"0.54450107",
"0.540008",
"0.540008",
"0.53876495",
"0.53665227",
"0.5356882",
"0.53556573",
"0.5353439"
] | 0.6350823 | 0 |
Run miraligner tool (from seqcluster suit) with default parameters. | def _miraligner(fastq_file, out_file, species, db_folder, config):
resources = config_utils.get_resources("miraligner", config)
miraligner = config_utils.get_program("miraligner", config)
jvm_opts = "-Xms750m -Xmx4g"
if resources and resources.get("jvm_opts"):
jvm_opts = " ".join(resources.get("jvm_opts"))
export = _get_env()
cmd = ("{export} {miraligner} {jvm_opts} -freq -sub 1 -trim 3 -add 3 -s {species} -i {fastq_file} -db {db_folder} -o {tx_out_file}")
if not file_exists(out_file + ".mirna"):
with file_transaction(out_file) as tx_out_file:
do.run(cmd.format(**locals()), "Do miRNA annotation for %s" % fastq_file)
shutil.move(tx_out_file + ".mirna", out_file + ".mirna")
return out_file + ".mirna" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def miraligner(args):\n hairpin, mirna = _download_mirbase(args)\n precursors = _read_precursor(args.hairpin, args.sps)\n matures = _read_mature(args.mirna, args.sps)\n gtf = _read_gtf(args.gtf)\n out_dts = []\n out_files = []\n for bam_fn in args.files:\n sample = op.splitext(op.basename(bam_fn))[0]\n logger.info(\"Reading %s\" % bam_fn)\n if bam_fn.endswith(\"bam\") or bam_fn.endswith(\"sam\"):\n bam_fn = _sam_to_bam(bam_fn)\n bam_sort_by_n = op.splitext(bam_fn)[0] + \"_sort\"\n pysam.sort(\"-n\", bam_fn, bam_sort_by_n)\n reads = _read_bam(bam_sort_by_n + \".bam\", precursors)\n elif bam_fn.endswith(\"fasta\") or bam_fn.endswith(\"fa\") or \\\n bam_fn.endswith(\"fastq\"):\n if args.collapse:\n bam_fn = _collapse_fastq(bam_fn)\n out_file = op.join(args.out, sample + \".premirna\")\n bam_fn = _filter_seqs(bam_fn)\n if args.miraligner:\n _cmd_miraligner(bam_fn, out_file, args.sps, args.hairpin, args.out)\n reads = _read_miraligner(out_file)\n out_files.append(out_file)\n else:\n raise ValueError(\"Format not recognized.\")\n\n if args.miraligner:\n _mirtop(out_files, args.hairpin, args.gtf, args.sps, args.out)\n\n if not args.miraligner:\n reads = _annotate(reads, matures, precursors)\n\n out_file = op.join(args.out, sample + \".mirna\")\n out_file, dt, dt_pre = _tab_output(reads, out_file, sample)\n try:\n vcf_file = op.join(args.out, sample + \".vcf\")\n if not file_exists(vcf_file):\n # if True:\n create_vcf(dt_pre, matures, gtf, vcf_file)\n try:\n import vcf\n vcf.Reader(filename=vcf_file)\n except Exception as e:\n logger.warning(e.__doc__)\n logger.warning(e)\n except Exception as e:\n # traceback.print_exc()\n logger.warning(e.__doc__)\n logger.warning(e)\n if isinstance(dt, pd.DataFrame):\n out_dts.append(dt)\n\n if out_dts:\n _create_counts(out_dts, args.out)\n else:\n print(\"No files analyzed!\")",
"def _cmd_miraligner(fn, out_file, species, hairpin, out):\n tool = _get_miraligner()\n path_db = op.dirname(op.abspath(hairpin))\n cmd = \"{tool} -freq -i {fn} -o {out_file} -s {species} -db {path_db} -sub 1 -trim 3 -add 3\"\n if not file_exists(out_file):\n logger.info(\"Running miraligner with %s\" % fn)\n do.run(cmd.format(**locals()), \"miraligner with %s\" % fn)\n shutil.move(out_file + \".mirna\", out_file)\n return out_file",
"def main(argv):\r\n\r\n mapperAbbrs = {'C':'cushaw', 'S':'shrimp', 'B':'bfast', 'W':'bwa-mem', 'N':'novoalign'}\r\n\r\n #Dictionary of commands to use for various mappers - configure your mapper commands here\r\n aligner_dict = {\r\n\t'B,CS,S':[\r\n\t\t'bfast fasta2brg -f DDiFasta -A 0',\r\n\t\t'bfast fasta2brg -f DDiFasta -A 1',\r\n\t\t'bfast index -f DDiFasta -m 1111111111111111111111 -w 14 -i 1 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 111110100111110011111111111 -w 14 -i 2 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 10111111011001100011111000111111 -w 14 -i 3 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 1111111100101111000001100011111011 -w 14 -i 4 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 111111110001111110011111111 -w 14 -i 5 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 11111011010011000011000110011111111 -w 14 -i 6 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 1111111111110011101111111 -w 14 -i 7 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 111011000011111111001111011111 -w 14 -i 8 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 1110110001011010011100101111101111 -w 14 -i 9 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 111111001000110001011100110001100011111 -w 14 -i 10 -A 1 -n DDiProcs',\r\n\t\t'bfast match -f DDiFasta -A 1 -i 1-10 -k 18 -K 100000 -w 0 -t -n DDiProcs -Q 100000 -l -r DDiFastq1 > DDiBMF',\r\n\t\t'bfast localalign -f DDiFasta -m DDiBMF -A 1 -n DDiProcs -U -q 20 -Q 100000 -t > DDiBAF',\r\n\t\t'rm DDiBMF',\r\n\t\t'bfast postprocess -f DDiFasta -i DDiBAF -o DDiAligned -O 1 -a 3 -z -n DDiProcs -q 20 -Q 100000 -t > DDiSAM',\r\n\t\t'rm DDiBAF'\r\n\t ],\r\n 'C,CS,S':[\r\n 'cushaw3 index DDiFasta -c -p bwtindex',\r\n 'cushaw3 calign -r bwtindex -f DDiFastq1 -t DDiProcs -multi 1 CushawOpts -o DDiSAM'\r\n ],\r\n 'C,NT,S':[\r\n 'cushaw3 index DDiFasta -p bwtindex',\r\n 'cushaw3 align -r bwtindex -f DDiFastq1 -t DDiProcs -multi 1 CushawOpts -o DDiSAM'\r\n ],\r\n 'C,NT,P':[\r\n 'cushaw3 index DDiFasta -p bwtindex',\r\n 'cushaw3 align -r bwtindex -q DDiFastq1 DDiFastq2 -t DDiProcs -multi 1 CushawOpts -o DDiSAM'\r\n ],\r\n 'S,CS,S':[\r\n 'gmapper-cs -N DDiProcs -Q -o 1 --strata --all-contigs ShrimpOpts DDiFastq1 DDiFasta > DDiSAM'\r\n ],\r\n 'S,NT,S':[\r\n 'gmapper-ls -N DDiProcs -Q -o 1 --strata --all-contigs ShrimpOpts DDiFastq1 DDiFasta > DDiSAM'\r\n ],\r\n 'S,NT,P':[\r\n 'gmapper-ls -N DDiProcs -Q -o 1 --strata --all-contigs ShrimpOpts -1 DDiFastq1 -2 DDiFastq2 DDiFasta > DDiSAM'\r\n ],\r\n\t'W,NT,S':[\r\n 'bwa index DDiFasta',\r\n\t 'bwa mem -t DDiProcs BwaMemOpts DDiFasta DDiFastq1 > DDiSAM'\r\n ],\r\n\t'W,NT,P':[\r\n 'bwa index DDiFasta',\r\n\t 'bwa mem -t DDiProcs BwaMemOpts DDiFasta DDiFastq1 DDiFastq2 > DDiSAM'\r\n ],\r\n\t'N,NT,S':[\r\n\t 'novoindex DDiNIX DDiFasta',\r\n 'novoalign -r Random -n 100 -o SAM -d DDiNIX -f DDiFastq1 > DDiSAM'\r\n ],\r\n\t'N,NT,P':[\r\n\t 'novoindex DDiNIX DDiFasta',\r\n 'novoalign -r Random -n 100 -o SAM NovoOpts -d DDiNIX -f DDiFastq1 DDiFastq2 > DDiSAM'\r\n ]\r\n }\r\n\r\n #Arguments that are required\r\n required = ['fastqFiles', 'mappingRefSeqFiles', 'outputDir']\r\n\r\n parser = argparse.ArgumentParser(description='Iteratively calls 3rd party mappers and DDiMap executable')\r\n\r\n #Argument options\r\n parser.add_argument('-q', type=str, metavar='file', nargs='+', help='list of fastq files', dest='fastqFiles')\r\n parser.add_argument('-r', type=str, metavar='file', nargs='+', help='list of files to use for reference sequences', dest='mappingRefSeqFiles')\r\n parser.add_argument('-j', type=str, metavar='file', nargs='+', help='list of files to use for junctions', dest='junctionRefSeqFiles')\r\n parser.add_argument('-o', type=str, metavar='directory', help='output directory', dest='outputDir')\r\n \r\n group = parser.add_mutually_exclusive_group()\r\n group.add_argument('-p', '--paired', action='store_true', help='fastq files have paired ends', dest='pairedEnds')\r\n group.add_argument('-s', '--single', action='store_false', help='fastq files have single ends', dest='pairedEnds')\r\n parser.add_argument('-n', type=int, metavar='cpus', help='number of processors to use', dest='nProcs')\r\n parser.add_argument('-c', type=str, metavar='config_file', help='location of config file', dest='configFile')\r\n parser.add_argument('-v', action='store_true', help='turns on verbosity', dest='verbose')\r\n\r\n parser.add_argument('--aligner_order', type=str, metavar='{'+','.join(mapperAbbrs.keys())+'}', help='mapper sequence as a string. ie CSC', dest='alignerOrder')\r\n parser.add_argument('--first_iter', metavar='n', type=int, help='first iteration', dest='firstIter')\r\n parser.add_argument('--max_iters', metavar='n', type=int, help='maximum iterations', dest='maxIters')\r\n parser.add_argument('--read_length', metavar='n', type=int, help='read length', dest='readLength')\r\n parser.add_argument('--read_type', type=str, help='read type', choices=['CS','NT'], dest='readType')\r\n parser.add_argument('--req_frag_conv', help='require frags to converge as well as SNVs', action='store_true', dest='reqFragConv')\r\n parser.add_argument('--no-req_frag_conv', help='does not require frags to converge as well as SNVs', action='store_false', dest='reqFragConv')\r\n\r\n parser.add_argument('--frag_maker_thresh',type=float, metavar='threshold', help='verified frag maker threshold', dest='fragMakerThresh')\r\n parser.add_argument('--frag_thresh', type=float, metavar='threshold', help='unverified frag maker threshold', dest='fragThresh')\r\n parser.add_argument('--min_absolute_cover', type=int, metavar='n', help='minimum absolute cover', dest='minAbsoluteCover')\r\n parser.add_argument('--snv_thresh', type=float, metavar='threshold', help='SNV threshold', dest='SNVthresh')\r\n parser.add_argument('--snv_type2_thresh', type=float, metavar='threshold', help='SNV type 2 threshold', dest='SNVtype2thresh')\r\n parser.add_argument('--snv_type3_thresh', type=float, metavar='threshold', help='SNV type 3 threshold', dest='SNVtype3thresh')\r\n parser.add_argument('--roa_size', type=int, metavar='size', help='Size to use for region of analysis in DDiMAP', dest='roaSize')\r\n\r\n group = parser.add_mutually_exclusive_group()\r\n group.add_argument('--use_DI', action='store_true', help='use reads mapped with deletion and insertion', dest='useDI')\r\n group.add_argument('--no-use_DI', action='store_false', help='do not use reads mapped with deletion and insertion', dest='useDI')\r\n\r\n parser.add_argument('--cushaw_opts', type=str, metavar=\"'options'\", help='cushaw specific options', dest='cushawOpts')\r\n parser.add_argument('--shrimp_opts', type=str, metavar=\"'options'\", help='shrimp specific options', dest='shrimpOpts')\r\n parser.add_argument('--bwamem_opts', type=str, metavar=\"'options'\", help='bwa-mem specific options', dest='bwaMemOpts')\r\n parser.add_argument('--novo_opts', type=str, metavar=\"'options'\", help='novoalign specific options', dest='novoOpts')\r\n\r\n\r\n #Parse args and check for config file\r\n args = parser.parse_args()\r\n if args.configFile:\r\n configFile = args.configFile\r\n if not path.isfile(configFile):\r\n print 'config file specified, but not found'\r\n exit(1)\r\n else:\r\n configFile = 'DDiMap.cfg'\r\n\r\n #Read in settings from config file\r\n Settings = read_config(configFile)\r\n\r\n # Loop over each section and replace values with those passed in on command line. \r\n # Also create a local variable that matches the keys in the settings dictionary.\r\n\r\n for section in Settings.keys():\r\n for key in Settings[section].keys():\r\n if getattr(args, key):\r\n Settings[section][key] = getattr(args, key)\r\n exec '%s = Settings[section][key]' % key\r\n if key in required and not Settings[section][key]:\r\n print '%s not specified on command line or in config file. Aborting...' % key\r\n print Settings[section][key]\r\n parser.print_help()\r\n exit(1)\r\n if (type(Settings[section][key]) == list):\r\n Settings[section][key] = ', '.join(Settings[section][key])\r\n\r\n if useDI: # reads with CIGARs containing both I and D are processed\r\n kFlag='-k'\r\n else: # reads with CIGARs containing both I and D are not processed\r\n kFlag=''\r\n\r\n if pairedEnds:\r\n pair_str='P'\r\n else:\r\n pair_str='S'\r\n\r\n # do the work - set up for the iteration\r\n aligners = list(alignerOrder)\r\n iterMin = len(aligners)\r\n iterMax = max(maxIters, iterMin); # always do as many iters as are in alignerOrder string\r\n aligners = aligners + list(repeat(aligners[-1], iterMax - iterMin)) # define the aligner ID sequence to be used over the iterations\r\n\r\n\r\n # Make paths absolute\r\n fastqFiles = [path.abspath(x) for x in fastqFiles]\r\n mappingRefSeqFiles = [path.abspath(x) for x in mappingRefSeqFiles]\r\n junctionRefSeqFiles = [path.abspath(x) for x in junctionRefSeqFiles]\r\n outputDir = path.abspath(outputDir) + '/'\r\n\r\n # Make sure the output directory exists\r\n\r\n if not path.isdir(outputDir):\r\n makedirs(outputDir)\r\n\r\n # Write configuration file in outputDir\r\n write_config(outputDir, Settings)\r\n\r\n # INITIAL VALUES OF LOOP CONTROL PARAMETERS\r\n converged = False\r\n prevFragList = [] # this will be replaced by counts of fragments created for each baseline refernce sequence\r\n prevSNVList = [] # this will be replaced by counts of SNV candidates found for each baseline reference sequence\r\n\r\n thisIter = firstIter\r\n\r\n\r\n for RefSeqFile in fastqFiles:\r\n if not path.isfile(RefSeqFile):\r\n print 'Unable to find fastqFile at ' + RefSeqFile\r\n exit(1)\r\n\r\n # Delete old enhanced fast file if present. It should never be...\r\n\r\n enhancedFastaFile = outputDir + 'refSeqEnhanced.fa'\r\n if path.isfile(enhancedFastaFile): # see if one is already here - need to zap it\r\n remove(enhancedFastaFile) # remove if present because fastawrite appends to existing files\r\n output_handle = open(enhancedFastaFile, 'a')\r\n\r\n # Add reference sequences to file with _Ref tag\r\n RefSeqs=[]\r\n for RefSeqFile in mappingRefSeqFiles:\r\n\tprint 'ref seq file = ' + RefSeqFile\r\n if not path.isfile(RefSeqFile):\r\n print 'Unable to find RefSeqFile at ' + RefSeqFile\r\n exit(1)\r\n RefSeqs = RefSeqs + list(SeqIO.parse(RefSeqFile, 'fasta'))\r\n if (RefSeqs):\r\n formattedRefSeqs = add_ref_tag(RefSeqs)\r\n SeqIO.write(formattedRefSeqs, output_handle, 'fasta') # modified MATLAB fastawrite to not put in extra newlines\r\n\r\n \r\n # Create junctions if they are needed and then add to ref seq file as mapping targets for chimeric reads\r\n RefSeqs=[]\r\n for RefSeqFile in junctionRefSeqFiles:\r\n if not path.isfile(RefSeqFile):\r\n print 'Unable to find RefSeqFile at ' + RefSeqFile\r\n exit(1)\r\n RefSeqs = RefSeqs + list(SeqIO.parse(RefSeqFile, 'fasta'))\r\n if (RefSeqs):\r\n formattedRefSeqs = add_ref_tag(RefSeqs)\r\n junctionSeqs = make_junctions(formattedRefSeqs,readLength);\r\n SeqIO.write(junctionSeqs, output_handle, 'fasta') # modified MATLAB fastawrite to not put in extra newlines\r\n\r\n output_handle.close() \r\n\r\n\r\n # allows restarts\r\n if thisIter > 1: # there is no previous iteration, so start fresh\r\n prevWorkingDir = outputDir + ('Gen%d/' % (thisIter-1))\r\n for i in range(1, thisIter):\r\n prevWorkingDir = '%sGen%d/' % (outputDir, i) \r\n fragFile = prevWorkingDir + 'fasta.fa'\r\n snvFile = prevWorkingDir + 'snv.csv'\r\n ddimap_convergence_test(fragFile, snvFile, prevFragList, prevSNVList, reqFragConv)\r\n\r\n\r\n while not converged and thisIter <= iterMax:\r\n \r\n print '======= Iteration %d of %d ========' % (thisIter, iterMax)\r\n\r\n # creates working dir if not present\r\n thisWorkingDir = outputDir + ('Gen%d/' % thisIter)\r\n if path.isdir(thisWorkingDir):\r\n rmtree(thisWorkingDir)\r\n makedirs(thisWorkingDir)\r\n \r\n # Delete old enhanced fast file if present. It should never be...\r\n enhancedFastaFile = thisWorkingDir + 'refSeqEnhanced.fa'\r\n if path.isfile(enhancedFastaFile): \r\n remove(enhancedFastaFile) \r\n copyfile(outputDir + 'refSeqEnhanced.fa', enhancedFastaFile)\r\n\r\n output_handle = open(enhancedFastaFile, 'a')\r\n \r\n # Append frags from previous iteration if any (these sequences are tagged as fragments when the file is written by DDiMAP)\r\n if (thisIter > 1):\r\n prevFragFile=prevWorkingDir + '/fasta.fa'\r\n if path.isfile(prevFragFile) and path.getsize(prevFragFile) > 0:\r\n fragSeqs=list(SeqIO.parse(prevFragFile, 'fasta'))\r\n SeqIO.write(fragSeqs, output_handle, 'fasta') # modified MATLAB fastawrite to not put in extra newlines\r\n\r\n output_handle.close() \r\n\r\n # Setup variables for aligner\r\n thisAligner=aligners[thisIter-1]\r\n thisAligned='DDiMAP_%s' % thisAligner\r\n \r\n if path.isfile(thisWorkingDir + 'mapper.log'):\r\n remove(thisWorkingDir + 'mapper.log')\r\n\r\n if not ','.join([thisAligner,readType,pair_str]) in aligner_dict.keys():\r\n print mapperAbbrs[thisAligner] + ' does not support ' + readType + ' read type with ' + ('paired ends' if pairedEnds else 'non paired ends')\r\n exit(1)\r\n\r\n\r\n # execute commands for aligner\r\n\r\n open(thisWorkingDir + 'mapper.log', 'w').close()\r\n if verbose:\r\n b=Popen(['tail', '-F', thisWorkingDir + 'mapper.log'])\r\n\r\n # set substitutions for aligner commands\r\n commandsubs={'DDiFastq1':fastqFiles[0], \r\n 'DDiProcs':nProcs, \r\n 'DDiFasta':enhancedFastaFile, \r\n 'DDiBMF':thisAligned + '.bmf', \r\n 'DDiBAF':thisAligned + '.baf', \r\n 'DDiSAM':thisAligned + '.sam',\r\n 'DDiNIX':thisAligned + '.nix', \r\n 'DDiAligned':thisAligned, \r\n 'CushawOpts':cushawOpts, \r\n 'ShrimpOpts':shrimpOpts, \r\n 'BwaMemOpts':bwaMemOpts, \r\n 'NovoOpts':novoOpts}\r\n\r\n if (len(fastqFiles) > 1):\r\n commandsubs['DDiFastq2']=fastqFiles[1]\r\n\r\n for command in aligner_dict[','.join([thisAligner,readType,pair_str])]:\r\n cmdlist=re.split('\\s*',command)\r\n #remove empty arguments and subsitute in values from commandsubs \r\n args=filter(None,[str(commandsubs[x]) if x in commandsubs.keys() else x for x in cmdlist])\r\n args=re.split('\\s*',' '.join(args)) \r\n print ' '.join(args) # output actual command\r\n if 'DDiFastq2' in args: #This hasn't been substituted because one wasn't provided\r\n print mapperAbbrs[thisAligner] + ' expects 2 fastq files for use with ' + readType + ' read type with ' + ('paired ends' if pairedEnds else 'non paired ends')\r\n exit(1)\r\n\r\n # Now we need to detect stdout redirection and do it properly using pOpen\r\n if '>' in args: \r\n i = args.index('>')\r\n outfile = args[i+1]\r\n del args[i:i+2]\r\n else:\r\n outfile = None\r\n \r\n log_file = open(thisWorkingDir + 'mapper.log', 'a')\r\n \r\n if (outfile):\r\n with open(thisWorkingDir + outfile, 'w') as output_file:\r\n a=Popen(args, cwd=thisWorkingDir, stdout=output_file, stderr=log_file)\r\n else:\r\n a=Popen(args, cwd=thisWorkingDir, stderr=log_file, stdout=log_file)\r\n\r\n success=a.wait()\r\n log_file.close()\r\n if not success == 0:\r\n print '*** mapper exited with error', success\r\n print 'See ' + thisWorkingDir + 'mapper.log' + ' for more details'\r\n exit(success)\r\n\r\n if verbose:\r\n b.terminate()\r\n # Perform sam to bam conversion for DDiMap\r\n args=['samtools', 'view', '-b', '-S', '-o', thisAligned + '.bam', thisAligned + '.sam']\r\n print ' '.join(args) \r\n\r\n open(thisWorkingDir + 'samtools.log', 'w').close()\r\n if verbose:\r\n b=Popen(['tail', '-F', thisWorkingDir + 'samtools.log'])\r\n log_file = open(thisWorkingDir + 'samtools.log', 'w')\r\n a=Popen(args, cwd=thisWorkingDir, stderr=log_file, stdout=log_file)\r\n success=a.wait()\r\n log_file.close()\r\n if verbose:\r\n b.terminate()\r\n if not success == 0:\r\n print '*** samtools exited with error', success\r\n print 'See ' + thisWorkingDir + 'samtools.log' + ' for more details' \r\n exit(success)\r\n # remove the uncompressed sam file\r\n args=['rm', thisAligned + '.sam'];\r\n a=Popen(args, cwd=thisWorkingDir)\r\n\r\n # now run the DDiMAP code\r\n thisAlignedFile = thisWorkingDir + thisAligned + '.bam'\r\n args = (['DDiMAP', kFlag, '-r', roaSize, '-f', enhancedFastaFile, '-b', \r\n thisAlignedFile, '-c', minAbsoluteCover, '-n', fragThresh, '-a', \r\n fragMakerThresh, '-p', SNVthresh, '-s', SNVtype2thresh, '-l', \r\n SNVtype3thresh, '-o', thisWorkingDir])\r\n args = [str(x) for x in args]\r\n print ' '.join(args)\r\n open(thisWorkingDir + 'DDiMap.log', 'w').close()\r\n if verbose:\r\n b=Popen(['tail', '-F', thisWorkingDir + 'DDiMap.log'])\r\n log_file = open(thisWorkingDir + 'DDiMap.log', 'a')\r\n a = Popen(args, cwd=thisWorkingDir, stdout=log_file, stderr=log_file)\r\n success=a.wait()\r\n if verbose:\r\n b.terminate()\r\n log_file.close()\r\n if not success == 0:\r\n print '*** DDiMap exited with error', success\r\n print 'See ' + thisWorkingDir + 'DDiMap.log' + ' for more details'\r\n exit(success)\r\n \r\n # now check for convergence\r\n \r\n fragFile = thisWorkingDir + 'fasta.fa'\r\n snvFile = thisWorkingDir + 'snv.csv'\r\n \r\n # call to the convergence test matlab function\r\n # result history kept in currFrags/prevFrags and currSNVs/prevSNVs\r\n \r\n if ddimap_convergence_test(fragFile, snvFile, prevFragList, prevSNVList, reqFragConv):\r\n print 'Convergence found. Stopping...'\r\n break\r\n\r\n prevWorkingDir = thisWorkingDir; # all done with the previous, this will be the next iteration previous directory\r\n thisIter = thisIter+1\r\n else:\r\n print 'Failed to converge'\r\n\r\n print '%10s %10s %10s' % ('Iteration', 'nFrags', 'nSNVs')\r\n for i, (frags, snvs) in enumerate(zip(prevFragList, prevSNVList)):\r\n print '%10d %10d %10d' % (i+1, sum(frags), sum(snvs))\r\n\r\n # put final results into outputDir\r\n # make renamed copies of the final iteration result files, naming them using\r\n copyfile(thisWorkingDir+'fasta.fa',outputDir+'convergedFrags.fa')\r\n copyfile(thisWorkingDir+'dictionary.csv',outputDir+'convergedDictionary.csv')\r\n copyfile(thisWorkingDir+'snv.csv',outputDir+'convergedSNVs.csv')\r\n copyfile(thisWorkingDir+'coverage.csv',outputDir+'convergedCoverage.csv')\r\n copyfile(thisWorkingDir+'refSeqEnhanced.fa',outputDir+'convergedEnhancedRefSeqs.fa')",
"def main():\n # Get command line options\n args = get_args()\n\n # Set substitution matrix:\n if args.exchange_matrix == \"pam250\":\n exchangeMatrix = pam250\n elif args.exchange_matrix == \"blosum62\":\n exchangeMatrix = blosum62\n else:\n exchangeMatrix = identity\n\n # Read sequences from fasta file, and catch error reading file\n try:\n sequences = readSequences(open(args.fasta))\n except OSError as e:\n print(\"ERROR: cannot open or read fasta input file:\", e.filename)\n\n for seq in sequences:\n print(seq)\n\n # Call alignment routine(s):\n if args.align_global:\n alignment, score_matrix = do_global_alignment(\n sequences, exchangeMatrix, args.gap_penalty)\n elif args.align_local:\n alignment, score_matrix = do_local_alignment(\n sequences, exchangeMatrix, args.gap_penalty)\n elif args.align_semiglobal:\n alignment, score_matrix = do_semiglobal_alignment(\n sequences, exchangeMatrix, args.gap_penalty)\n else:\n sys.exit(\"BUG! this should not happen.\")\n \n\n # Print the result to files\n if args.alignment: \n print_alignment_to_file(alignment, args.alignment)\n if args.score_matrix:\n print_matrix_to_file(score_matrix, args.score_matrix)\n \n # Print the result on screen\n if args.print_on_screen:\n print_matrix_on_screen(alignment)\n print_matrix_on_screen(score_matrix)",
"def run(config=None):\n AlignmentWorkflow().run(config)",
"def align(args) :\n from aligner import align_reads\n align_reads(args)",
"def main(options):\n # Check if path to MIRZA is valid\n if not is_executable(options.mirzabin):\n raise Exception(\"Path to MIRZA is invalid (%s)! Please define it with --mirzabin option.\" % options.mirzabin)\n if options.verbose:\n syserr(\"Reading coordinate file\\n\")\n coords = read_coordinates(options.coords, True)\n\n if options.verbose:\n syserr(\"Reading mRNA sequences\\n\")\n mRNAseqs = read_fasta_to_dict(options.seq)\n\n if options.verbose:\n syserr(\"Reading miRNA sequences\\n\")\n miRNAseqs = read_fasta_to_dict(options.motifs)\n\n if options.onlymirza != 'yes':\n if options.verbose:\n syserr(\"Preparing alignments and phylogenetic tree\\n\")\n\n phylo_tree = read_phylogenetic_tree(options.tree)\n multiple_alignment_dict = read_multiple_alignments(phylo_tree,\n options.mln_dir,\n coords)\n mirhomologues = make_homologues_mirnas(phylo_tree, miRNAseqs)\n\n with gzip.open(options.out, 'wb') as outfile:\n if options.verbose:\n syserr(\"Collecting sequences\\n\")\n mRNA_sequences = [cor[-1] for cor in coords]\n mRNA_ids = [\"%s,%s,%s\" % (cor[0], cor[2], cor[3]) for cor in coords]\n number_of_coords = len(set([cor[1] for cor in coords])) == 1\n if number_of_coords > 1:\n raise Exception(\"More than mirna in coordinate file\")\n if number_of_coords == 0:\n syserr(\"There is no coordinates. Exit.\")\n sys.exit()\n\n miRNAseq = miRNAseqs[list(set([cor[1] for cor in coords]))[0]][:21]\n miRNAid = list(set([cor[1] for cor in coords]))[0]\n\n if options.verbose:\n syserr(\"Running MIRZA\\n\")\n results = calculate_mirza(mRNA_sequences, mRNA_ids, miRNAseq, miRNAid)\n\n if options.verbose:\n syserr(\"Collecting results\\n\")\n for key, group in itertools.groupby(results.splitlines(), lambda x: x == \"\"):\n if not key:\n proper_group = False\n for line in group:\n if line.startswith(\">\"):\n mRNAid = line.split()[0][1:].split(\",\")[0]\n beg = line.split()[0][1:].split(\",\")[1]\n end = line.split()[0][1:].split(\",\")[2]\n score = float(line.split()[-1])\n proper_group = True\n # elif line.startswith(\"miRNA\"):\n # mirhyb = line.split(\"\\t\")[1].split(\" \")[0]\n # elif line.startswith(\"A L\"):\n # hyb = line.split(\"\\t\")[1].rstrip()\n elif line.startswith(\"mRNA\"):\n mrhyb = line.split(\"\\t\")[1].split(\" \")[0]\n if proper_group:\n if len(miRNAseq) < 21:\n outtext = '%s,%s,%s,%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n' % (mRNAid,\n miRNAid,\n beg,\n end,\n \"NA\",\n \"NA\",\n \"NA\",\n \"NA\",\n \"NA\")\n outfile.write(outtext)\n continue\n\n # hybrids = [mirhyb, hyb, mrhyb]\n # mirseq, hybseq, mrhybseq, mrpos = get_hybrid_vector(hybrids)\n # canonical, type_of_site = is_canonical([mirseq, hybseq, mrhybseq])\n if options.onlymirza != 'yes':\n try:\n mln_frag = multiple_alignment_dict[mRNAid]\n qd = calculate_conservation(phylotree=phylo_tree,\n mrna_frag=mrhyb.replace(\"-\", \"\")[::-1],\n mrnaid=mRNAid,\n mirna=mirhomologues,\n mirname=miRNAid,\n mln_dict=mln_frag,\n ref_org=options.reforg,\n threshold=options.thr,\n mrna_len=options.contextLen)\n qd = str(qd)\n except KeyError, e:\n qd = \"NA\"\n sys.stderr.write(\"KeyError: \" + str(e) + \"\\n\")\n sys.stderr.write(\"Trace: \"\n + traceback.format_exc()\n + \"\\n\")\n # raise KeyError\n else:\n qd = \"NA\"\n outtext = '%s,%s,%s,%s\\t%f\\t%s\\n' % (mRNAid,\n miRNAid,\n beg,\n end,\n score,\n # \":\".join(hybrids),\n qd)\n # \"canonical\" if canonical else \"non-canonical\",\n # type_of_site)\n outfile.write(outtext)\n clean()",
"def novoalign(self) -> None:\n self.analysis.logger.info(\"Running alignment with NovoAlign\")\n self.chdir()\n config = self.analysis.config\n executor = Executor(self.analysis)\n barcoded = BarcodedFilename.from_sample(self.analysis.sample)\n with tempfile.TemporaryDirectory(dir=config.temporary_dir) as tmpdir:\n filename = os.path.join(tmpdir, \"align.log\")\n fh = logging.FileHandler(filename)\n self.analysis.logger.addHandler(fh)\n if barcoded.analyte == Analyte.WHOLE_EXOME:\n executor(\n f\"{config.novoalign} \"\n f'-oSAM \"@RG\\tID:{self.analysis.basename}\\t'\n f'SM:{self.analysis.sample}\\tLB:lib1\\tPL:ILLUMINA\" '\n f\"-d {{genome_index}} \"\n f\"-i PE {{kit.mean_len_library}},{{kit.sd_len_library}} \"\n f\"-t 90 -f {{input_filename}}> {{output_filename}}\",\n input_function=lambda l: \" \".join(sorted(l)),\n input_split_reads=False,\n output_format=f\"{self.analysis.basename}{{organism_str}}.sam\",\n split_by_organism=True,\n only_human=self.only_human,\n unlink_inputs=True,\n )\n elif barcoded.analyte == Analyte.GENE_PANEL:\n executor(\n f\"{config.novoalign} \"\n f\"-C \"\n f'-oSAM \"@RG\\tID:{self.analysis.basename}\\t'\n f'SM:{self.analysis.sample}\\tLB:lib1\\tPL:ILLUMINA\" '\n f\"-d {{genome_index}} \"\n f\"-i 50-500 -h 8 -H 20 --matchreward 3 -t 90 \"\n f\"-f {{input_filename}}> {{output_filename}}\",\n input_function=lambda l: \" \".join(sorted(l)),\n input_split_reads=False,\n output_format=f\"{self.analysis.basename}{{organism_str}}.sam\",\n split_by_organism=True,\n only_human=self.only_human,\n unlink_inputs=True,\n )\n else:\n raise Exception(\"Unnhandled analyte\")\n # CSV NOVOALIGN\n with open(filename, \"r\") as file_log, open(\n self.output_basename + \"_novoalign.csv\", \"w\"\n ) as csv_file, open(\n self.output_basename + \"_stat_novoalign.csv\", \"w\"\n ) as stat_csv_file:\n writer = csv.writer(csv_file)\n writer_stat = csv.writer(stat_csv_file)\n is_csv = False\n is_stat = False\n values = []\n labels = []\n for line in file_log:\n fields = line.split(\":\")\n label = fields[0][1:].strip()\n\n if is_stat is True:\n if label == \"No Mapping Found\":\n is_stat = False\n values.append(fields[1].strip().split()[0])\n labels.append(label)\n elif label == \"Paired Reads\":\n values.append(fields[1].strip().split()[0])\n labels.append(label)\n is_stat = True\n else:\n fields = line.split()\n if is_csv is True:\n if fields[1] == \"Mean\":\n break\n else:\n writer.writerow(fields[1:4])\n elif fields[1] == \"From\":\n writer.writerow(fields[1:4])\n is_csv = True\n writer_stat.writerow(labels)\n writer_stat.writerow(values)\n self.analysis.logger.removeHandler(fh)\n fh.close()\n self.analysis.logger.info(\"Alignment finished. Aligner used: NovoAlign\")",
"def exec_from_args(args):\n outfolder = args.folder + '/normal/'\n check(outfolder, 'm')\n\n makeconfig(str(args.gene_names), str(args.sequences), str(args.org_included),\n len_threshold=args.len_threshold,\n its=str(args.its), query_cover=str(args.query_cover), identity=str(args.identity),\n distance=str(args.string_distance), subsp=str(args.subsp), excluded=str(args.org_excluded),\n remote=str(args.remote_blast), folder=args.folder, date=args.today, blacklist=args.blacklist,\n synonyms=args.synonyms)\n\n r = Run('n', args.folder, args.debug)\n r.start()",
"def mcmc(args):\n\n config_file = args.setupfn\n conf_base = os.path.basename(config_file).split('.')[0]\n statfile = os.path.join(args.outputdir,\n \"{}_radvel.stat\".format(conf_base))\n\n if args.save or args.proceed:\n backend_loc = os.path.join(args.outputdir, conf_base+'_rawchain.h5')\n else:\n backend_loc = None\n\n status = load_status(statfile)\n P, post = radvel.utils.initialize_posterior(config_file,\n decorr=args.decorr)\n\n if status.getboolean('fit', 'run'):\n print(\"Loading starting positions from previous MAP fit\")\n\n post = radvel.posterior.load(status.get('fit', 'postfile'))\n\n msg1 = (\n \"Running MCMC for {}, N_walkers = {}, N_steps = {}, N_ensembles = {}, Min Auto Factor = {}, \"\n ).format(conf_base, args.nwalkers, args.nsteps, args.ensembles, args.minAfactor)\n\n msg2 = (\n \"Max Auto Relative-Change = {}, Max G-R = {}, Min Tz = {} ...\"\n ).format(args.maxArchange, args.maxGR, args.minTz)\n\n print(msg1 + '\\n' + msg2)\n\n chains = radvel.mcmc(post, nwalkers=args.nwalkers, nrun=args.nsteps, ensembles=args.ensembles,\n minAfactor=args.minAfactor, maxArchange=args.maxArchange, burnAfactor=args.burnAfactor,\n burnGR=args.burnGR, maxGR=args.maxGR, minTz=args.minTz, minsteps=args.minsteps,\n minpercent=args.minpercent, thin=args.thin, serial=args.serial, save=args.save,\n savename=backend_loc, proceed=args.proceed, proceedname=backend_loc, headless=args.headless)\n\n mintz = statevars.mintz\n maxgr = statevars.maxgr\n minafactor = statevars.minafactor\n maxarchange = statevars.maxarchange\n\n # Convert chains into synth basis\n synthchains = chains.copy()\n for par in post.params.keys():\n if not post.vector.vector[post.vector.indices[par]][1]:\n synthchains[par] = post.vector.vector[post.vector.indices[par]][0]\n\n synthchains = post.params.basis.to_synth(synthchains)\n synth_quantile = synthchains.quantile([0.159, 0.5, 0.841])\n\n # Get quantiles and update posterior object to median\n # values returned by MCMC chains\n post_summary = chains.quantile([0.159, 0.5, 0.841])\n\n for k in chains.columns:\n if k in post.params.keys():\n post.vector.vector[post.vector.indices[k]][0] = post_summary[k][0.5]\n\n post.vector.vector_to_dict()\n\n print(\"Performing post-MCMC maximum likelihood fit...\")\n post = radvel.fitting.maxlike_fitting(post, verbose=False)\n\n final_logprob = post.logprob()\n final_residuals = post.likelihood.residuals().std()\n final_chisq = np.sum(post.likelihood.residuals()**2 / (post.likelihood.errorbars()**2))\n deg_of_freedom = len(post.likelihood.y) - len(post.likelihood.get_vary_params())\n final_chisq_reduced = final_chisq / deg_of_freedom\n post.vector.vector_to_dict()\n synthparams = post.params.basis.to_synth(post.params)\n\n print(\"Calculating uncertainties...\")\n post.uparams = {}\n post.medparams = {}\n post.maxparams = {}\n for par in synthparams.keys():\n maxlike = synthparams[par].value\n med = synth_quantile[par][0.5]\n high = synth_quantile[par][0.841] - med\n low = med - synth_quantile[par][0.159]\n err = np.mean([high, low])\n if maxlike == -np.inf and med == -np.inf and np.isnan(low) and np.isnan(high):\n err = 0.0\n else:\n err = radvel.utils.round_sig(err)\n if err > 0.0:\n med, err, errhigh = radvel.utils.sigfig(med, err)\n maxlike, err, errhigh = radvel.utils.sigfig(maxlike, err)\n post.uparams[par] = err\n post.medparams[par] = med\n post.maxparams[par] = maxlike\n\n print(\"Final loglikelihood = %f\" % final_logprob)\n print(\"Final RMS = %f\" % final_residuals)\n print(\"Final reduced chi-square = {}\".format(final_chisq_reduced))\n print(\"Best-fit parameters:\")\n print(post)\n\n print(\"Saving output files...\")\n saveto = os.path.join(args.outputdir, conf_base+'_post_summary.csv')\n post_summary.to_csv(saveto, sep=',')\n\n postfile = os.path.join(args.outputdir,\n '{}_post_obj.pkl'.format(conf_base))\n post.writeto(postfile)\n\n csvfn = os.path.join(args.outputdir, conf_base+'_chains.csv.bz2')\n chains.to_csv(csvfn, compression='bz2')\n\n auto = pd.DataFrame()\n auto['autosamples'] = statevars.autosamples\n auto['automin'] = statevars.automin\n auto['automean'] = statevars.automean\n auto['automax'] = statevars.automax\n auto['factor'] = statevars.factor\n autocorr = os.path.join(args.outputdir, conf_base+'_autocorr.csv')\n auto.to_csv(autocorr, sep=',')\n\n savestate = {'run': True,\n 'postfile': os.path.relpath(postfile),\n 'chainfile': os.path.relpath(csvfn),\n 'autocorrfile': os.path.relpath(autocorr),\n 'summaryfile': os.path.relpath(saveto),\n 'nwalkers': statevars.nwalkers,\n 'nensembles': args.ensembles,\n 'maxsteps': args.nsteps*statevars.nwalkers*args.ensembles,\n 'nsteps': statevars.ncomplete,\n 'nburn': statevars.nburn,\n 'minafactor': minafactor,\n 'maxarchange': maxarchange,\n 'minTz': mintz,\n 'maxGR': maxgr}\n save_status(statfile, 'mcmc', savestate)\n\n statevars.reset()",
"def align(filename, prog, outfile):\n ra = RunAlign()\n ra.run_align(filename, prog, outfile)",
"def run_mrtrans(align_fasta, rec_1, rec_2, work_dir):\n align_file = op.join(work_dir, \"prot-align.fasta\")\n nuc_file = op.join(work_dir, \"nuc.fasta\")\n output_file = op.join(work_dir, \"nuc-align.mrtrans\")\n\n # make the protein alignment file\n align_h = open(align_file, \"w\")\n align_h.write(str(align_fasta))\n align_h.close()\n # make the nucleotide file\n SeqIO.write((rec_1, rec_2), file(nuc_file, \"w\"), \"fasta\")\n\n # run the program\n cl = MrTransCommandline(align_file, nuc_file, output_file)\n r, e = cl.run()\n if e is None:\n print >>sys.stderr, \"\\tpal2nal:\", cl\n return output_file\n elif e.read().find(\"could not translate\") >= 0:\n print >>sys.stderr, \"***pal2nal could not translate\"\n return None",
"def main():\n if Args['clean']:\n print(f\"Clear the temporary output directory {Args['tmpdir']}\",\n file=sys.stderr)\n subprocess.run(f\"rm -r {Args['tmpdir']}\", shell=True)\n sys.exit(0)\n\n # Configure snakemake execution\n print(\"Configure Snakemake for execution.\")\n os.makedirs(Args['tmpdir'], exist_ok=True)\n config.argparse_to_json(Args)\n if Args['snakemakedir'] is None:\n Args['snakemakedir'] = os.path.dirname(os.path.realpath(__file__)) + \"/snakemake\"\n if Args['clusterconfig_template'] is None:\n Args['clusterconfig_template'] = Args['snakemakedir'] + \"/cluster_config_template.json\"\n config.argparse_to_clusterconfig(Args,\n Args['clusterconfig_template'])\n os.makedirs(Args['tmpdir'] + \"/logs\", exist_ok=True)\n if not Args['local']:\n os.makedirs(Args['tmpdir'] + \"/cluster_logs\", exist_ok=True)\n Args['cluster_cmd'] = f\"--cluster-config {Args['tmpdir']}/snakemake_cluster.json --cluster '{Args['cluster_cmd']}'\"\n Args['cluster_cmd'] += f\" --resources cores={Args['max_resources']}\"\n Args['cluster_cmd'] += f\" {Args['snakemake_args']}\"\n print(f\"Cluster command: {Args['cluster_cmd']}\", file=sys.stderr)\n else:\n Args['cluster_cmd'] = ''\n\n # Extract list of species from MetaPhlAn database pickle\n if (not os.path.isfile(Args['tmpdir'] + \"/repgenomes_urls.txt\") or\n Args['force']):\n print(\"1. Extract all species from MetaPhlAn database and prepare URLS \"\n \"for download from NCBI.\", file=sys.stderr)\n if os.path.isdir(Args['databasedir']):\n db_versions = [re.search(r'mpa_v([0-9]+)_.+.pkl',\n os.path.basename(db)).group(1)\n for db in glob(f\"{Args['databasedir']}/*.pkl\")]\n db_versions.sort()\n if Args['metaphlanversion'] == 'latest':\n db_version = db_versions[-1]\n elif Args['metaphlanversion'].replace(\"v\", \"\") in db_versions:\n db_version = Args['metaphlanversion'].replace(\"v\", \"\")\n else:\n print(f\"The database version {Args['metaphlanversion']} is not \"\n f\"present in the database directory {Args['databasedir']}. \"\n \"The following database versions are currently available: \"\n f\"{', '.join(['v' + db for db in db_versions])}. Either \"\n \"pick from the available or download the database using \"\n \"MetaPhlAn.\",\n file=sys.stderr)\n sys.exit(1)\n species_cont = species.Species(db_version, Args['databasedir'])\n print(\"\\tExtract the strain information from the MetaPhlAn database\",\n file=sys.stderr)\n species_cont.extract_strains()\n else:\n print(f'The directory {Args[\"databasedir\"]} does not exist. Specify a '\n 'valid directory that contains the MetaPhlAn databases.',\n file=sys.stderr)\n sys.exit(1)\n\n # Download the overview of RefSeq genomes and join information with genomes\n print(\"\\tDownload the GCA assembly summary from NCBI\\n\",\n file=sys.stderr)\n subprocess.run(f'cd {Args[\"tmpdir\"]} && wget -N -nH '\n '--user-agent=Mozilla/5.0 --relative -r --no-parent '\n '--reject \"index.html*\" --cut-dirs=2 -e robots=off '\n f'{Args[\"genbankurl\"]}', shell=True)\n print(\"\\tJoin the GCA assembly summary information with the MetaPhlAn \"\n \"database information\", file=sys.stderr)\n species_cont.join_genbank(Args['tmpdir'] + \"/\" +\n os.path.basename(Args['genbankurl']))\n print(f\"\\tFetch information for missing genomes from NCBI Assembly directly.\",\n file=sys.stderr)\n species_cont.get_missing_information()\n print(\"\\tDetermine the representative genomes of \"\n f\"{species_cont.genomes.shape[0]} genomes present in the database\",\n file=sys.stderr)\n if Args['taxnames'] is None:\n taxnames = []\n else:\n if os.path.isfile(Args['taxnames']):\n taxnames = [line.rstrip()\n for line in open(Args['taxnames'], 'rt')]\n else:\n print(f\"The species list file {Args['taxnames']} does not exist. \"\n \"Specify the correct path to file.\", file=sys.stderr)\n sys.exit(1)\n species_cont.subset_taxa(taxnames)\n species_cont.determine_representative_genomes()\n print(f\"\\tIdentified {len(species_cont.representative_genomes)} genomes.\\n\"\n \"\\tPrepare URL list for download of genomes from NCBI.\", file=sys.stderr)\n species_cont.write_url_list(Args['tmpdir'] + \"/repgenomes_urls.txt\")\n print(\"Write table with information to genomes used in phylogenetic \"\n f\"analysis to {Args['tmpdir'] + '/genomes.tsv'}.\", file=sys.stderr)\n species_cont.genomes_set.loc[species_cont.genomes_set['GCAid']\n .isin(species_cont.representative_genomes)] \\\n .to_csv(Args['tmpdir'] + \"/genomes.tsv\", sep=\"\\t\", index=False)\n\n if Args['stop_at'] == 'download_representative_genomes':\n print(f\"Terminate at checkpoint {Args['stop_at']}.\", file=sys.stderr)\n sys.exit(0)\n elif (not os.path.isfile(Args['tmpdir'] + \"/done/download_representative_genomes\") or\n Args['force']):\n print(\"\\nDownload the representative genomes from NCBI\\n\", file=sys.stderr)\n subprocess.run(f\"snakemake -s {Args['snakemakedir']}/download_genomes.Snakefile \"\n f\"--configfile {Args['tmpdir']}/snakemake_config.json \"\n f\"{Args['cluster_cmd']} \"\n \"--restart-times 5 -k \"\n f\"-j {Args['nproc']}\", shell=True,\n stderr=open(Args['tmpdir'] +\n \"/logs/snakemake-download_representative_genomes.log\",\n \"at\"))\n if not os.path.isfile(Args['tmpdir'] +\n \"/done/download_representative_genomes\"):\n errormessages.print_errormessage(\"download_representative_genomes\",\n Args['tmpdir'])\n sys.exit(1)\n\n if Args['stop_at'] == 'install_marker_database':\n print(f\"Terminate at checkpoint {Args['stop_at']}.\", file=sys.stderr)\n sys.exit(0)\n elif (not os.path.isfile(Args['tmpdir'] + \"/done/install_marker_database\") or\n Args['force']):\n print(\"Prepare marker gene database of Segata et al. (2013) for tree \"\n \"building\", file=sys.stderr)\n database.write_superconfig_aa(Args['tmpdir'])\n phylophlan_config = database.load_check_config(Args['tmpdir'],\n Args['nproc'])\n database.install_marker_database(Args['tmpdir'], phylophlan_config)\n Path(Args['tmpdir'] + '/done/install_marker_database').touch(exist_ok=True)\n\n if Args['stop_at'] == 'fake_proteomes':\n print(f\"Terminate at checkpoint {Args['stop_at']}.\", file=sys.stderr)\n sys.exit(0)\n elif (not os.path.isfile(Args['tmpdir'] + \"/done/fake_proteomes\") or\n Args['force']):\n print(\"Uncompress FastA files downloaded from NCBI, align against the \"\n \"protein marker database of Segata et al. (2013) using DIAMOND \"\n \"blastx, identify and extract marker genes, and translate into \"\n \"amino acid sequences\", file=sys.stderr)\n subprocess.run(f\"snakemake -s {Args['snakemakedir']}/fake_proteomes.Snakefile \"\n f\"--configfile {Args['tmpdir']}/snakemake_config.json \"\n f\"{Args['cluster_cmd']} \"\n \"--restart-times 5 \"\n f\"-j {Args['nproc']}\", shell=True,\n stderr=open(Args['tmpdir'] +\n \"/logs/snakemake-fake_proteomes.log\", \"at\"))\n if not os.path.isfile(Args['tmpdir'] +\n \"/done/fake_proteomes\"):\n errormessages.print_errormessage(\"fake_proteomes\", Args['tmpdir'])\n sys.exit(1)\n\n if Args['stop_at'] == 'protein_markers':\n print(f\"Terminate at checkpoint {Args['stop_at']}.\", file=sys.stderr)\n sys.exit(0)\n elif (not os.path.isfile(Args['tmpdir'] + \"/done/protein_markers\") or\n Args['force']):\n print(\"Clean the fake proteomes, align against the protein marker \"\n \"database of Segata et al. (2013) using DIAMOND blastp, and \"\n \"identify and extract marker gene sequences.\", file=sys.stderr)\n subprocess.run(f\"snakemake -s {Args['snakemakedir']}/protein_markers.Snakefile \"\n f\"--configfile {Args['tmpdir']}/snakemake_config.json \"\n f\"{Args['cluster_cmd']} \"\n \"--restart-times 5 \"\n f\"-j {Args['nproc']}\", shell=True,\n stderr=open(Args['tmpdir'] +\n \"/logs/snakemake-protein_markers.log\", \"at\"))\n if not os.path.isfile(Args['tmpdir'] +\n \"/done/protein_markers\"):\n errormessages.print_errormessage(\"protein_markers\", Args['tmpdir'])\n sys.exit(1)\n\n if Args['stop_at'] == 'alignment':\n print(f\"Terminate at checkpoint {Args['stop_at']}.\", file=sys.stderr)\n sys.exit(0)\n elif (not os.path.isfile(Args['tmpdir'] + \"/done/alignment\") or\n Args['force']):\n print(\"Identify the marker genes that are present at least in four \"\n \"genomes and make alignments, trim non-variant sites and remove \"\n \"samples consisting of >= 90% gaps\", file=sys.stderr)\n subprocess.run(f\"snakemake -s {Args['snakemakedir']}/alignment.Snakefile \"\n f\"--configfile {Args['tmpdir']}/snakemake_config.json \"\n f\"{Args['cluster_cmd']} \"\n \"--restart-times 5 \"\n f\"-j {Args['nproc']}\", shell=True,\n stderr=open(Args['tmpdir'] +\n \"/logs/snakemake-alignment.log\", \"at\"))\n if not os.path.isfile(Args['tmpdir'] +\n \"/done/alignment\"):\n errormessages.print_errormessage(\"alignment\", Args['tmpdir'])\n sys.exit(1)\n\n if Args['stop_at'] == 'tree':\n print(f\"Terminate at checkpoint {Args['stop_at']}.\", file=sys.stderr)\n sys.exit(0)\n else:\n if (not os.path.isfile(Args['tmpdir'] + \"/done/tree\") or\n Args['force']):\n print(\"Concatenate all markers into one alignment, build tree using \"\n \"FastTree, and refining using RAxML\", file=sys.stderr)\n subprocess.run(f\"snakemake -s {Args['snakemakedir']}/tree.Snakefile \"\n f\"--configfile {Args['tmpdir']}/snakemake_config.json \"\n f\"{Args['cluster_cmd']} \"\n \"--restart-times 5 \"\n f\"-j {Args['nproc']}\", shell=True,\n stderr=open(Args['tmpdir'] +\n \"/logs/snakemake-tree.log\", \"at\"))\n if not os.path.isfile(Args['tmpdir'] + \"/done/tree\"):\n errormessages.print_errormessage(\"tree\", Args['tmpdir'])\n sys.exit(1)\n\n if (not os.path.isfile(Args['output']) or Args['force']):\n print(\"Annotate the tree with taxonomic information and write to output \"\n \"file.\", file=sys.stderr)\n tree_annot_df = pd.read_csv(Args['tmpdir'] + \"/genomes.tsv\",\n sep=\"\\t\")[['GCAid', 'label']] \\\n .set_index(['GCAid'])\n\n treefn = Args['tmpdir'] + \"/MetaPhlAn3tree.RAxML.tre\"\n tre = Tree(open(treefn, \"rt\").readline())\n for l in tre.iter_leaves():\n l.name = tree_annot_df.loc[l.name.replace(\".faa\", \"\"), 'label']\n tre.write(outfile=Args['output'])\n else:\n print(f\"The tree output file {Args['output']} already exists and the \"\n \"option '--force' has not been enabled to re-run it. Activate \"\n \"this option to re-run all steps or remove the output file prior \"\n \"to running the script.\", file=sys.stderr)\n sys.exit(1)",
"def __init__(self, reads1, reads2):\n print \"Start Analysis...\"\n self.alignment()\n self.sai_to_sam()\n self.sam_to_bam()\n #self.clean_files()",
"def main(argv=None):\n\n if argv is None:\n argv = sys.argv\n\n parser = E.OptionParser(\n version=\"%prog version: $Id: maq2assembly.py 2781 2009-09-10 11:33:14Z andreas $\")\n\n parser.add_option(\"-f\", \"--is-forward-coordinates\", dest=\"forward_coordinates\",\n help=\"translate to forward coordinates.\", action=\"store_true\")\n\n parser.add_option(\"-p\", \"--output-filename-pattern\", dest=\"output_filename_pattern\", type=\"string\",\n help=\"OUTPUT filename pattern for additional data [%default].\")\n\n parser.add_option(\"--method\", dest=\"methods\", type=\"choice\", action=\"append\",\n choices=(\"gff\", \"coverage\", \"region\", \"quality\"),\n help=\"methods to apply [%default].\")\n\n parser.set_defaults(\n output_format=\"%08i\",\n output_filename_pattern=\"%s\",\n methods=[],\n )\n\n (options, args) = E.Start(parser)\n\n ################################################\n ################################################\n ################################################\n # pick a processor\n ################################################\n methods = []\n\n if len(options.methods) == 0:\n raise \"please supply at least one method to apply.\"\n\n genome_fasta, queries_fasta = None, None\n\n for method in options.methods:\n if method == \"gff\":\n methods.append(BuilderGFF(genome_fasta, queries_fasta, options))\n elif method == \"coverage\":\n methods.append(\n BuilderCoverage(genome_fasta, queries_fasta, options))\n elif method == \"quality\":\n methods.append(\n BuilderQuality(genome_fasta, queries_fasta, options))\n elif method == \"region\":\n methods.append(BuilderRegion(genome_fasta, queries_fasta, options))\n\n for method in methods:\n method.printHeader()\n\n ninput, noutput = 0, 0\n id = 0\n for contig, start, end, reads, qualities in reader(options.stdin):\n\n ninput += 1\n id += 1\n for m in methods:\n m(id, contig, start, end, reads, qualities)\n\n noutput += 1\n\n options.stdlog.write(\"# ninput=%i, noutput=%i\\n\" % (ninput, noutput))\n\n E.Stop()",
"def _generic_aligner_commandline_file(cline, seqrecs, **kwargs):\n assert len(seqrecs) > 1, \"Need more than 1 sequence for alignment.\"\n # build alignment object 'unaligned'; pad seqrecs to be equal length\n unaligned = pad_seq_records_for_alignment(seqrecs)\n # execute alignment\n with tempfile.NamedTemporaryFile(delete=False, mode=\"w\") as tempf:\n AlignIO.write(unaligned, tempf, \"fasta\")\n tempf.flush()\n return cline(tempf, **kwargs)",
"def run_brisera_alignment(sc, refpath, qrypath, outpath):\n # Execute the alignments\n alignments, adelta = brisera.align_all(sc, refpath, qrypath)\n\n # Filter best alignments\n if settings.filter_align:\n alignments, fdelta = brisera.filter_alignments(sc, alignments)\n else:\n fdelta = 0\n\n # Write alignments to disk\n alignments.mapPartitions(write_records).saveAsTextFile(outpath)\n\n return alignments, adelta, fdelta",
"def run (self, bioseqs, *clargs):\t\t\n\t\t## Preconditions:\n\t\tassert (2 <= len (bioseqs))\n\t\t## Main:\n\t\tself._inseqs = bioseqs\n\t\tself.call_cmdline (*clargs)",
"def initial_alignment(in_file, name, outdir, errors, config):\n align_out = os.path.join(outdir, \"%s-match.fastq\" % name)\n noalign_out = os.path.join(outdir, \"%s-nomatch.fastq\" % name)\n if not os.path.exists(align_out) or not os.path.exists(noalign_out):\n out_params = [\"--al\", align_out, \"--un\", noalign_out]\n out_params += [\"--solexa1.3-quals\"]\n run_bowtie(in_file, config[\"reference\"][0][\"file\"], None, errors,\n extra_params=out_params)\n return align_out, noalign_out",
"def main_dist(uid: str, **kwargs):\n # cfg = conf\n assert \"ds_to_use\" in kwargs\n ds_to_use = kwargs[\"ds_to_use\"]\n assert ds_to_use in [\"asrl_qa\", \"ch_qa\"]\n if ds_to_use == \"asrl_qa\":\n cfg = get_default_cfg()\n elif ds_to_use == \"ch_qa\":\n cfg = get_ch_cfg()\n else:\n raise NotImplementedError\n\n num_gpus = torch.cuda.device_count()\n cfg.num_gpus = num_gpus\n cfg.uid = uid\n cfg.cmd = sys.argv\n if num_gpus > 1:\n if \"local_rank\" in kwargs:\n # We are doing distributed parallel\n cfg.do_dist = True\n torch.distributed.init_process_group(backend=\"nccl\", init_method=\"env://\")\n torch.cuda.set_device(kwargs[\"local_rank\"])\n synchronize()\n else:\n # We are doing data parallel\n cfg.do_dist = False\n # cfg.do_dp = True\n # Update the config file depending on the command line args\n cfg = update_from_dict(cfg, kwargs, key_maps)\n cfg = post_proc_config(cfg)\n # Freeze the cfg, can no longer be changed\n cfg.freeze()\n # print(cfg)\n # Initialize learner\n learn = learner_init(uid, cfg)\n # Train or Test\n if not (cfg.only_val or cfg.only_test or cfg.overfit_batch):\n learn.fit(epochs=cfg.train.epochs, lr=cfg.train.lr)\n if cfg.run_final_val:\n print(\"Running Final Validation using best model\")\n learn.load_model_dict(resume_path=learn.model_file, load_opt=False)\n val_loss, val_acc, _ = learn.validate(\n db={\"valid\": learn.data.valid_dl}, write_to_file=True\n )\n print(val_loss)\n print(val_acc)\n else:\n pass\n else:\n if cfg.overfit_batch:\n learn.overfit_batch(cfg.train.epochs, 1e-4)\n if cfg.only_val:\n # learn.load_model_dict(resume_path=learn.model_file, load_opt=False)\n if cfg.train.resume_path != \"\":\n resume_path = cfg.train.resume_path\n else:\n resume_path = learn.model_file\n learn.load_model_dict(resume_path=resume_path)\n val_loss, val_acc, _ = learn.validate(\n db={\"valid\": learn.data.valid_dl}, write_to_file=True\n )\n print(val_loss)\n print(val_acc)\n # learn.testing(learn.data.valid_dl)\n pass\n if cfg.only_test:\n # learn.testing(learn.data.test_dl)\n learn.load_model_dict(resume_path=learn.model_file, load_opt=False)\n test_loss, test_acc, _ = learn.validate(db=learn.data.test_dl)\n print(test_loss)\n print(test_acc)\n\n return",
"def main() -> None:\n args = _get_arguments()\n\n file_level_logging = logging.DEBUG if args.log_to_file else None\n setup_logger(logging.INFO, file_level_logging)\n\n if not os.path.exists(args.smiles):\n mol = Molecule(smiles=args.smiles)\n if mol.rd_mol is None:\n logger().error(\n f\"The --smiles argument ({args.smiles})\"\n \" does not point to an existing file or is a valid RDKit SMILES.\"\n \" Cannot start retrosynthesis planning.\"\n )\n return\n\n if args.nproc:\n _multiprocess_smiles(args)\n return\n\n multi_smiles = os.path.exists(args.smiles)\n\n finder = AiZynthFinder(configfile=args.config)\n _select_stocks(finder, args)\n post_processing = _load_postprocessing_jobs(args.post_processing)\n finder.expansion_policy.select(args.policy or finder.expansion_policy.items[0])\n if args.filter:\n finder.filter_policy.select(args.filter)\n else:\n finder.filter_policy.select_all()\n\n params = [\n args.smiles,\n finder,\n args.output,\n args.cluster,\n args.route_distance_model,\n post_processing,\n args.checkpoint,\n ]\n if multi_smiles:\n _process_multi_smiles(*params)\n else:\n params = params[:-1]\n _process_single_smiles(*params)",
"def main(matrix,model,processors,algorithm):\n if algorithm == \"raxml-ng\":\n ab = subprocess.call(['which', 'raxml-ng'])\n if ab == 0:\n pass\n else:\n print(\"RAxML must be in your path as raxml-ng\")\n sys.exit()\n elif algorithm == \"raxml-HPC\":\n ab = subprocess.call(['which', 'raxmlHPC-PTHREADS-SSE3'])\n if ab == 0:\n pass\n else:\n print(\"RAxML must be in your path as raxmlHPC-PTHREADS-SSE3\")\n sys.exit()\n last=get_field_index(matrix)\n matrix_to_fasta(matrix, last)\n #Prep the creation of the FASTA file, removing odd characters\n os.system(\"sed 's/://g' all.fasta | sed 's/,//g' > out.fasta\")\n if model == \"ASC_GTRGAMMA\":\n subprocess.check_call(\"raxmlHPC-SSE3 -f d -p 12345 -m %s -s out.fasta -n nasp --asc-corr=lewis --no-bfgs > /dev/null 2>&1\" % model, stdout=open(os.devnull, 'wb'),stderr=open(os.devnull, 'wb'),shell=True)\n subprocess.check_call(\"raxmlHPC-SSE3 -f e -m %s -s out.fasta -t RAxML_bestTree.nasp -n PARAMS --asc-corr=lewis --no-bfgs > /dev/null 2>&1\" % model, stdout=open(os.devnull, 'wb'),stderr=open(os.devnull, 'wb'),shell=True)\n else:\n if algorithm == \"raxml-HPC\":\n subprocess.check_call(\"raxmlHPC-PTHREADS-SSE3 -T %s -f d -p 12345 -m %s -s out.fasta -n nasp --no-bfgs > /dev/null 2>&1\" % (processors,model), stdout=open(os.devnull, 'wb'),stderr=open(os.devnull, 'wb'),shell=True)\n subprocess.check_call(\"raxmlHPC-PTHREADS-SSE3 -T %s -f e -m %s -s out.fasta -t RAxML_bestTree.nasp -n PARAMS --no-bfgs > /dev/null 2>&1\" % (processors,model), stdout=open(os.devnull, 'wb'),stderr=open(os.devnull, 'wb'),shell=True)\n elif algorithm == \"raxml-ng\":\n subprocess.check_call(\"raxml-ng --msa out.fasta --model GTR+G --threads %s --prefix nasp\" % processors,stdout=open(os.devnull, 'wb'),stderr=open(os.devnull, 'wb'),shell=True)\n if algorithm == \"raxml-HPC\":\n subprocess.check_call(\"mv RAxML_bestTree.nasp nasp_raxml.tree\", shell=True)\n subprocess.check_call(\"mv RAxML_binaryModelParameters.PARAMS nasp.PARAMS\", shell=True)\n subprocess.check_call(\"rm RAxML_* out.fasta all.fasta\", shell=True)\n else:\n subprocess.check_call(\"mv nasp.raxml.bestTree nasp_raxml.tree\", stdout=open(os.devnull, 'wb'),stderr=open(os.devnull, 'wb'),shell=True)\n subprocess.check_call(\"rm nasp.raxml.startTree out.fasta all.fasta\", stdout=open(os.devnull, 'wb'),stderr=open(os.devnull, 'wb'),shell=True)\n print(\"Model used: %s\" % model)",
"def run(self) :\n# print \"evaluating with laban\"\n # currently, labanx reads from a preset file\n os.system('labanx '+str(self.rank)+\" \"+self.input+\" \"+self.output)",
"def main(argv=None):\n\n if argv is None:\n argv = sys.argv[1:]\n\n parser = argparse.ArgumentParser(description=__description__)\n\n # Positionals\n parser.add_argument(\"fasta_file\",help=\"fasta file to be turned into kmers\")\n\n # Options\n parser.add_argument(\"-o\",\"--outbase\",help=\"base name for output files\",action=\"store\",type=str,default=None)\n parser.add_argument(\"-k\",\"--kmersize\",help=\"kmer size\",action=\"store\",type=int,default=12)\n parser.add_argument(\"-s\",\"--seqperfile\",help=\"number of sequences per output file\",action=\"store\",\n type=int,default=50000)\n parser.add_argument(\"-n\",\"--numkmers\",\n help=\"Number of kmers to make, starting from most to least common. If -1, make all possible.\",\n type=int,default=1000000)\n\n args = parser.parse_args(argv)\n\n if args.outbase is None:\n out_base = args.fasta_file\n else:\n out_base = args.outbase\n\n parse_proteome(args.fasta_file,kmer_size=args.kmersize,out_base=out_base,\n seq_per_file=args.seqperfile,num_to_write=args.numkmers)",
"def run(args):\n # args = args_init(vars(get_args()), align=True)\n\n log.info('running RNAseq pipeline')\n\n ## default arguments, for RNAseq2 only\n args['align_to_rRNA'] = True\n\n ## multireads should not be discarded\n ## https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4728800/\n ## Genome Biology, 2016\n if args['include_multi_reads']:\n args['unique_only'] = False # \n else:\n args['unique_only'] = True # default \n\n # determine gtf file\n # gene gtf\n if args['gtf'] is None:\n args['gtf'] = Genome(**args).gene_gtf('refseq') # ucsc version\n\n # for GRCh38 genome, using GENCODE version \n # if args['genome'] == 'GRCh38':\n if args['genome'] in ['GRCh38', 'GRCm38']:\n args['gtf'] = Genome(**args).gene_gtf('ensembl') # gencode\n\n # te gtf\n if args['te_gtf'] is None:\n args['te_gtf'] = Genome(**args).te_gtf()\n\n # print(args['gtf'])\n\n ## update prefix\n ctl_prefix = str_common([os.path.basename(f) for f in args['c1']])\n ctl_prefix = ctl_prefix.rstrip('r|R|rep|Rep').rstrip('_|.')\n if args['C'] is None:\n args['C'] = ctl_prefix\n tre_prefix = str_common([os.path.basename(f) for f in args['t1']])\n tre_prefix = tre_prefix.rstrip('r|R|rep|Rep').rstrip('_|.')\n if args['T'] is None:\n args['T'] = tre_prefix\n \n ## run pipeline\n if args['extra_index']:\n extra_rnaseq(args, args['extra_gtf'])\n elif args['align_to_te']:\n te_rnaseq(args, args['te_gtf'])\n else:\n gene_rnaseq(args)\n\n log.info('finish')",
"def run_mash(self):\n if (self.path2 != \"\" and self.path3 != \"\"):\n ###Set up command###\n # <exe path> dist <options> <ref seq> <query seq>\n # Sketch size by default 10000, kmer = 16\n ####################\n\n ###Initialize variables\n ref_file = self.path2\n query_file = self.path3\n self.mash_out_path = os.getcwd() + \"/mash_out.txt\"\n path_to_exe = \"/Users/ddooley/bioinformatics_packages/Mash/mash\"\n cmd = \"\"\n flag_str = \"-s 10000 -k 16 -i\"\n cmd += path_to_exe + \" dist \" + \" \" + flag_str + \" \" + ref_file + \" \" + query_file + \" > \" + self.mash_out_path\n os.system(cmd)\n\n else:\n QtWidgets.QMessageBox.question(self, \"Error!\",\"You must select reference and query .fasta files.\",QtWidgets.QMessageBox.Ok)",
"def run_bwa(reference, readset):\n\tbwaindexcmd = \"bwa index \"+reference\n\tsubprocess.call(bwaindexcmd, shell=True)\n\talignedsam = reference+\".aligned.sam\"\n\tbwacmd = \"bwa mem -t 12 \"+reference+\" \"+readset\n\tbwacmd += \" | samtools view -SF 4 - > \"+alignedsam\n\tsubprocess.call(bwacmd, shell=True)\n\treturn alignedsam",
"def main():\n parser = argparse.ArgumentParser(\n description=\"Lite version of the CNVnator written in Python.\\nA tool for CNV discovery from depth of read mapping.\")\n parser.add_argument('-version', '--version', action='store_true', help='show version number and exit')\n parser.add_argument('-root', '--root', type=str, nargs=\"+\",\n help=\"CNVnator hd5 file: data storage for all calculations\", default=None)\n\n parser.add_argument('-download', '--download_resources', action='store_true', help='download resource files')\n\n parser.add_argument('-chrom', '--chrom', type=str, nargs=\"+\", help=\"list of chromosomes to apply calculation\",\n default=[])\n parser.add_argument('-v', '--verbose', type=str,\n choices=[\"none\", \"debug\", \"info\", \"warning\", \"error\", \"d\", \"e\", \"i\", \"w\"],\n help=\"verbose level: debug, info (default), warning, error\", default=\"info\")\n parser.add_argument('-log', '--log_file', type=str, help='log file')\n parser.add_argument('-j', '--max_cores', type=int,\n help=\"maximal number of cores to use in calculation\", default=8)\n parser.add_argument('-rd', '--rd', nargs=\"+\", type=str, help=\"read bam/sam/cram and store read depth information\")\n parser.add_argument('-T', '--reference_filename', type=str, help=\"reference fasta for CRAM\")\n\n parser.add_argument('-gc', '--gc', type=str, help=\"read fasta file and store GC/AT content\")\n parser.add_argument('-cgc', '--copy_gc', type=str, help=\"copy GC/AT content from another cnvnator file\")\n parser.add_argument('-his', '--his', type=binsize_type, nargs=\"+\",\n help=\"create histograms for specified bin size (multiple bin sizes separate by space)\")\n parser.add_argument('-snp2his', '--his_from_snp', type=binsize_type, nargs=\"+\",\n help=\"create histograms for specified bin size (multiple bin sizes separate by space)\")\n parser.add_argument('-stat', '--stat', type=binsize_type, nargs=\"+\",\n help=\"calculate statistics for specified bin size (multiple bin sizes separate by space)\")\n parser.add_argument('-partition', '--partition', type=binsize_type, nargs=\"+\",\n help=\"calculate segmentation for specified bin size (multiple bin sizes separate by space)\")\n parser.add_argument('-call', '--call', type=str, nargs=\"+\",\n help=\"CNV caller: [baf] bin_size [bin_size2 ...] (multiple bin sizes separate by space)\")\n parser.add_argument('-vcf', '-snp', '--vcf', nargs=\"+\", type=str, help=\"read SNP data from vcf files\")\n parser.add_argument('-somatic_snv', '--somatic_snv', nargs=\"+\", type=str, help=\"read SNP data from vcf files\")\n\n parser.add_argument('-minc', '--min_count', type=int,\n help=\"minimal count of haterozygous SNPs\", default=None)\n parser.add_argument('-vcf2rd', '--rd_from_vcf', type=str, help=\"read SNP data from vcf files\")\n parser.add_argument('-noAD', '--no_snp_counts', action='store_true',\n help=\"read positions of variants, not counts (AD tag)\")\n parser.add_argument('-nofilter', '--no_filter', action='store_true',\n help=\"read all variants (not only PASS)\")\n parser.add_argument('-ad', '--ad_tag', type=str, help=\"counts tag (default: AD)\", default=\"AD\")\n parser.add_argument('-gt', '--gt_tag', type=str, help=\"genotype tag (default: GT)\", default=\"GT\")\n parser.add_argument('-dp', '--dp_tag', type=str, help=\"read depth tag (default: DP)\", default=\"DP\")\n parser.add_argument('-callset', '--callset', type=str, help=\"name for somatic VCF signal\", default=None)\n parser.add_argument('-maxcn', '--max_copy_number', type=int, help=\"maximal copy number\", default=10)\n parser.add_argument('-mindbaf', '--baf_threshold', type=float, help=\"threshold for change in BAF level\",\n default=0.0)\n parser.add_argument('-bafres', '--baf_resolution', type=int, help=\"Resolution for unphased BAF likelihood\",\n default=200)\n parser.add_argument('-nolh', '--no_save_likelihood', action='store_true',\n help=\"do not save likelihood histograms (reduce size of pytor file)\")\n parser.add_argument('-oth', '--overlap_threshold', type=float, help=\"likelihood overlap threshold\",\n default=None)\n parser.add_argument('-mincf', '--min_cell_fraction', type=float, help=\"minimal cell fraction\", default=0.0)\n\n parser.add_argument('-pileup', '--pileup_bam', nargs=\"+\", type=str, help=\"calculate SNP counts from bam files\")\n parser.add_argument('-snp2rd', '--rd_from_snp', action='store_true', help=\"calculate RD from SNP counts\")\n parser.add_argument('-sbin', '--s_bin_size', type=binsize_type, help=\"Super bin size (use with -snp2rd)\",\n default=10000)\n\n parser.add_argument('-mask', '--mask', type=str, help=\"read fasta mask file and flag SNPs in P region\")\n parser.add_argument('-mask_snps', '--mask_snps', action='store_true', help=\"flag SNPs in P region\")\n parser.add_argument('-trio_phase', '--trio_phase', action='store_true', help=\"Phase trio\")\n parser.add_argument('-parents', '--phase_parents', action='store_true', help=\"Phase parents\")\n parser.add_argument('-mask_snvs', '--mask_snvs', type=str, help=\"flag SNVs in P region\")\n parser.add_argument('-idvar', '--idvar', type=str, help=\"read vcf file and flag SNPs that exist in database file\")\n parser.add_argument('-random_phase', '--random_phase', action='store_true', help=\"randomly phase SNPs\")\n parser.add_argument('-baf', '--baf', type=binsize_type, nargs=\"+\",\n help=\"create BAF histograms for specified bin size (multiple bin sizes separate by space)\")\n parser.add_argument('-nomask', '--no_mask', action='store_true', help=\"do not use P mask in BAF histograms\")\n parser.add_argument('-useid', '--use_id', action='store_true', help=\"use id flag filtering in SNP histograms\")\n parser.add_argument('-usehom', '--use_hom', action='store_true', help=\"use hom\")\n parser.add_argument('-usephase', '--use_phase', action='store_true',\n help=\"use information about phase while processing SNP data\")\n parser.add_argument('-reducenoise', '--reduce_noise', action='store_true',\n help=\"reduce noise in processing SNP data\")\n parser.add_argument('-blw', '--baf_likelihood_width', type=float,\n help=\"likelihood width used in processing SNP data (default=0.8)\", default=0.8)\n parser.add_argument('-altc', '--alt_corr', action='store_true',\n help=\"Remove alt/ref bias\")\n\n parser.add_argument('-plot', '--plot', type=str, nargs=\"+\", help=\"plotting\")\n parser.add_argument('-view', '--view', type=binsize_type,\n help=\"Enters interactive ploting mode\")\n parser.add_argument('-agg', '--force_agg', action='store_true', help=\"Force Agg matplotlib backend\")\n\n parser.add_argument('-panels', '--panels', type=str, nargs=\"+\", default=[\"rd\"], choices=[\"rd\", \"baf\", \"likelihood\"],\n help=\"plot panels (with -plot regions)\")\n\n parser.add_argument('-style', '--plot_style', type=str,\n help=\"available plot styles: \" + \", \".join(plt.style.available), choices=plt.style.available)\n parser.add_argument('-o', '--plot_output_file', type=str, help=\"output filename prefix and extension\", default=\"\")\n parser.add_argument('-anim', '--animation', type=str, help=\"animation folder/prefix\", default=\"\")\n\n parser.add_argument('-make_gc_file', '--make_gc_genome_file', action='store_true',\n help=\"used with -gc will create genome gc file\")\n parser.add_argument('-make_mask_file', '--make_mask_genome_file', action='store_true',\n help=\"used with -mask will create genome mask file\")\n parser.add_argument('-rd_use_mask', '--use_mask_with_rd', action='store_true', help=\"used P mask in RD histograms\")\n parser.add_argument('-nogc', '--no_gc_corr', action='store_true', help=\"do not use GC correction in RD histograms\")\n parser.add_argument('-rg', '--reference_genome', type=str, help=\"Manually set reference genome\", default=None)\n parser.add_argument('-sample', '--vcf_sample', type=str, help=\"Sample name in vcf file\", default=\"\")\n parser.add_argument('-conf', '--reference_genomes_conf', type=str, help=\"Configuration with reference genomes\",\n default=None)\n\n parser.add_argument('-ls', '--ls', action='store_true', help='list pytor file(s) content')\n parser.add_argument('-gc_info', '--gc_info', action='store_true', help='list pytor file(s) gc content stat')\n parser.add_argument('-rg_info', '--rg_info', action='store_true', help='list loaded reference gnomes')\n parser.add_argument('-info', '--info', type=binsize_type, nargs=\"*\", help='print statistics for pythor file(s)')\n parser.add_argument('-qc', '--qc', type=binsize_type, nargs=\"*\", help='print quality control statistics')\n parser.add_argument('-rdqc', '--rd_qc', type=binsize_type, nargs=\"*\",\n help='print quality control statistics without SNP data')\n parser.add_argument('-comp', '--compare', type=str, nargs=\"*\", help='compere two regions: -comp reg1 reg2 [n_bins]')\n parser.add_argument('-genotype', '--genotype', type=str, nargs=\"*\")\n parser.add_argument('-a', '--all', action='store_true', help='Genotype with all columns')\n parser.add_argument('-meta', '--metadata', action='store_true', help='list Metadata')\n parser.add_argument('-fasta2rg', '--reference_genome_template', type=str,\n help=\"create template for reference genome using chromosome lengths from fasta file\")\n parser.add_argument('-export', '--export', type=str, nargs=\"*\", help='Export to jbrowse and cnvnator')\n args = parser.parse_args(sys.argv[1:])\n\n log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n if args.verbose in {\"debug\", \"d\"}:\n level = logging.DEBUG\n elif args.verbose in {\"info\", \"i\"}:\n level = logging.INFO\n elif args.verbose in {\"warning\", \"w\"}:\n level = logging.WARNING\n elif args.verbose in {\"error\", \"e\"}:\n level = logging.ERROR\n else:\n level = logging.CRITICAL\n\n if args.log_file:\n logging.basicConfig(filename=args.log_file, level=logging.DEBUG, format=log_format)\n logger = logging.getLogger('cnvpytor')\n ch = logging.StreamHandler()\n formatter = logging.Formatter(log_format)\n ch.setFormatter(formatter)\n ch.setLevel(level)\n logger.addHandler(ch)\n else:\n logging.basicConfig(level=level, format=log_format)\n logger = logging.getLogger('cnvpytor')\n logger.debug(\"Start logging...\")\n\n if args.reference_genome_template is not None:\n Fasta(args.reference_genome_template).print_reference_genome_template()\n\n if args.download_resources:\n Genome.download_resources()\n return 0\n\n if not Genome.check_resources():\n logger.error(\"Some reference genome resource files are missing. \"\n \"Run 'cnvpytor -download' as same user who has installed cnvpytor.\")\n return 0\n\n if args.version:\n print('CNVpytor {}'.format(__version__))\n return 0\n\n if args.reference_genomes_conf:\n Genome.load_reference_genomes(args.reference_genomes_conf)\n elif os.path.exists(os.path.expanduser('~/.cnvpytor/reference_genomes_conf.py')):\n Genome.load_reference_genomes(os.path.expanduser('~/.cnvpytor/reference_genomes_conf.py'))\n\n if args.rg_info:\n Genome.print_reference_genomes()\n\n if args.root is not None:\n\n if args.ls:\n show = Show(args.root)\n show.ls()\n\n if args.gc_info:\n show = Show(args.root)\n show.gc_info()\n\n if args.export:\n if len(args.export) > 0:\n dir_name_list = args.export[1:]\n dir_name = ''\n if len(dir_name_list) > 0:\n dir_name = dir_name_list[0]\n export_program = args.export[0].lower()\n if export_program in ['jbrowse', 'cnvnator']:\n if export_program == 'jbrowse':\n export_j = ExportJBrowse(args.root, dir_name)\n export_j.create_reference_json()\n export_j.rd_signal()\n export_j.snp_signal()\n export_j.create_tracklist_json()\n elif export_program == 'cnvnator':\n logger.info(\"Under Development\")\n else:\n logger.error(\"Incorrect export program name\")\n\n if args.metadata:\n show = Show(args.root)\n show.meta()\n\n if args.info is not None:\n show = Show(args.root)\n show.info(args.info)\n\n\n if args.genotype is not None:\n params = {\"output_filename\": args.plot_output_file,\n \"chrom\": args.chrom,\n \"panels\": args.panels,\n \"snp_use_mask\": not args.no_mask,\n \"snp_use_id\": args.use_id,\n \"rd_use_mask\": args.use_mask_with_rd\n }\n view = Viewer(args.root, params, force_agg=args.force_agg)\n view.genotype_prompt(list(map(binsize_type, args.genotype)), all=args.all)\n\n if args.qc is not None:\n params = {\"bin_size\": binsize_type(args.qc[-1]),\n \"chrom\": args.chrom,\n \"snp_use_mask\": not args.no_mask,\n \"snp_use_id\": args.use_id,\n \"rd_use_mask\": args.use_mask_with_rd,\n \"rd_use_gc_corr\": not args.no_gc_corr\n }\n view = Viewer(args.root, params, force_agg=args.force_agg)\n view.qc()\n\n if args.rd_qc is not None:\n params = {\"bin_size\": binsize_type(args.rd_qc[-1]),\n \"chrom\": args.chrom,\n \"snp_use_mask\": not args.no_mask,\n \"snp_use_id\": args.use_id,\n \"rd_use_mask\": args.use_mask_with_rd,\n \"rd_use_gc_corr\": not args.no_gc_corr\n }\n view = Viewer(args.root, params, force_agg=args.force_agg)\n view.qc(snp_qc=False)\n\n\n if args.compare is not None:\n params = {\"bin_size\": binsize_type(args.compare[-1]),\n \"rd_use_gc_corr\": not args.no_gc_corr,\n \"rd_use_mask\": args.use_mask_with_rd\n }\n view = Viewer(args.root, params, force_agg=args.force_agg)\n if len(args.compare) == 3:\n view.compare(args.compare[0], args.compare[1])\n elif len(args.compare) == 4:\n view.compare(args.compare[0], args.compare[1], int(args.compare[2]))\n\n if args.rd:\n app = Root(args.root[0], create=True, max_cores=args.max_cores)\n app.rd(args.rd, chroms=args.chrom, reference_filename=args.reference_filename)\n\n if args.reference_genome:\n app = Root(args.root[0], max_cores=args.max_cores)\n app.set_reference_genome(args.reference_genome)\n\n if args.plot:\n params = {\"output_filename\": args.plot_output_file,\n \"chrom\": args.chrom,\n \"panels\": args.panels,\n \"snp_use_mask\": not args.no_mask,\n \"snp_use_id\": args.use_id,\n \"rd_use_mask\": args.use_mask_with_rd,\n \"rd_use_gc_corr\": not args.no_gc_corr\n }\n if args.plot_style:\n params[\"style\"] = args.plot_style\n view = Viewer(args.root, params)\n view.plot_command(args.plot)\n\n if args.view:\n params = {\"bin_size\": args.view,\n \"output_filename\": args.plot_output_file,\n \"chrom\": args.chrom,\n \"panels\": args.panels,\n \"snp_use_mask\": not args.no_mask,\n \"snp_use_id\": args.use_id,\n \"rd_use_mask\": args.use_mask_with_rd,\n \"rd_use_gc_corr\": not args.no_gc_corr\n }\n if args.plot_style:\n params[\"style\"] = args.plot_style\n view = Viewer(args.root, params, force_agg=args.force_agg)\n view.prompt()\n\n if args.gc:\n app = Root(args.root[0], create=True, max_cores=args.max_cores)\n app.gc(args.gc, chroms=args.chrom, make_gc_genome_file=args.make_gc_genome_file)\n\n if args.copy_gc:\n app = Root(args.root[0], create=True, max_cores=args.max_cores)\n app.copy_gc(args.copy_gc, chroms=args.chrom)\n\n if args.vcf:\n app = Root(args.root[0], create=True, max_cores=args.max_cores)\n app.vcf(args.vcf, chroms=args.chrom, sample=args.vcf_sample, no_counts=args.no_snp_counts,\n ad_tag=args.ad_tag, gt_tag=args.gt_tag, filter=not args.no_filter)\n\n if args.idvar:\n app = Root(args.root[0], create=True, max_cores=args.max_cores)\n app.variant_id(args.idvar, chroms=args.chrom)\n\n if args.somatic_snv:\n app = Root(args.root[0], create=True, max_cores=args.max_cores)\n callset = \"default\" if args.callset is None else args.callset\n app.vcf(args.somatic_snv, chroms=args.chrom, sample=args.vcf_sample, no_counts=args.no_snp_counts,\n ad_tag=args.ad_tag, gt_tag=args.gt_tag, filter=not args.no_filter, callset=callset)\n\n if args.rd_from_vcf:\n app = Root(args.root[0], create=True, max_cores=args.max_cores)\n app.rd_from_vcf(args.rd_from_vcf, chroms=args.chrom, sample=args.vcf_sample, ad_tag=args.ad_tag,\n dp_tag=args.dp_tag)\n\n if args.pileup_bam:\n app = Root(args.root[0], max_cores=args.max_cores)\n app.pileup(args.pileup_bam, chroms=args.chrom, reference_filename=args.reference_filename)\n\n if args.rd_from_snp:\n app = Root(args.root[0], max_cores=args.max_cores)\n app.rd_from_snp(chroms=args.chrom, use_mask=not args.no_mask, use_id=args.use_id,\n s_bin_size=args.s_bin_size)\n\n if args.mask:\n app = Root(args.root[0], create=True, max_cores=args.max_cores)\n app.mask(args.mask, chroms=args.chrom, make_mask_genome_file=args.make_mask_genome_file)\n\n if args.mask_snps:\n app = Root(args.root[0], max_cores=args.max_cores)\n app.mask_snps()\n\n if args.mask_snvs:\n app = Root(args.root[0], max_cores=args.max_cores)\n app.mask_snps(callset=args.mask_snvs)\n\n if args.random_phase:\n app = Root(args.root[0], max_cores=args.max_cores)\n app.random_phase()\n\n if args.trio_phase:\n app = Trio(args.root)\n app.trio_phase(parents=args.phase_parents)\n\n if args.stat:\n app = Root(args.root[0], max_cores=args.max_cores)\n app.rd_stat(chroms=args.chrom)\n\n if args.his:\n app = Root(args.root[0], max_cores=args.max_cores)\n app.calculate_histograms(args.his, chroms=args.chrom)\n\n if args.his_from_snp:\n app = Root(args.root[0], max_cores=args.max_cores)\n app.calculate_histograms_from_snp_counts(args.his_from_snp, chroms=args.chrom, use_mask=not args.no_mask,\n use_id=args.use_id, callset=args.callset,\n min_count=args.min_count)\n if args.baf:\n app = Root(args.root[0], max_cores=args.max_cores)\n app.calculate_baf(args.baf, chroms=args.chrom, use_mask=not args.no_mask, use_id=args.use_id,\n use_phase=args.use_phase, res=args.baf_resolution, reduce_noise=args.reduce_noise, blw=args.baf_likelihood_width,\n use_hom=args.use_hom, alt_ref_correct=args.alt_corr, save_likelihood=not args.no_save_likelihood)\n if args.partition:\n app = Root(args.root[0], max_cores=args.max_cores)\n app.partition(args.partition, chroms=args.chrom, use_gc_corr=not args.no_gc_corr,\n use_mask=args.use_mask_with_rd)\n\n if args.call:\n app = Root(args.root[0], max_cores=args.max_cores)\n if args.call[0] == \"baf\":\n if args.call[1] in [\"mosaic\", \"germline\"]:\n event_type = args.call[1]\n bins = list(map(binsize_type, args.call[2:]))\n else:\n event_type = \"both\"\n bins = list(map(binsize_type, args.call[1:]))\n if args.use_phase:\n app.call_baf_phased(bins, chroms=args.chrom, event_type=event_type, print_calls=True,\n use_gc_corr=not args.no_gc_corr,\n rd_use_mask=args.use_mask_with_rd, snp_use_mask=not args.no_mask, snp_use_id=args.use_id,\n mcount=args.min_count, max_copy_number=args.max_copy_number,\n min_cell_fraction=args.min_cell_fraction, baf_threshold=args.baf_threshold,\n omin=args.overlap_threshold, use_hom=args.use_hom, anim=args.animation)\n else:\n app.call_baf(bins, chroms=args.chrom, event_type=event_type, print_calls=True,\n use_gc_corr=not args.no_gc_corr,\n rd_use_mask=args.use_mask_with_rd, snp_use_mask=not args.no_mask, snp_use_id=args.use_id,\n mcount=args.min_count, max_copy_number=args.max_copy_number,\n min_cell_fraction=args.min_cell_fraction, baf_threshold=args.baf_threshold,\n omin=args.overlap_threshold, use_hom=args.use_hom, anim=args.animation)\n #app.call_baf_old([binsize_type(x) for x in args.call[1:]], chroms=args.chrom, use_id=args.use_id,\n # use_mask=not args.no_mask, mcount=args.min_count, anim=args.animation)\n elif args.call[0] == \"mosaic\":\n app.call_mosaic(list(map(binsize_type, args.call[1:])), chroms=args.chrom,\n use_gc_corr=not args.no_gc_corr,\n use_mask=args.use_mask_with_rd, anim=args.animation)\n elif args.call[0] == \"subclones\":\n bins = list(map(binsize_type, args.call[1:]))\n app.call_subclones(bins, chroms=args.chrom, cnv_calls=\"calls combined\", print_calls=True,\n use_gc_corr=not args.no_gc_corr, rd_use_mask=args.use_mask_with_rd,\n snp_use_mask=not args.no_mask, snp_use_id=args.use_id,\n max_copy_number=args.max_copy_number,\n min_cell_fraction=args.min_cell_fraction, baf_threshold=args.baf_threshold)\n elif args.call[0] == \"combined\":\n if args.call[1] in [\"mosaic\", \"germline\"]:\n event_type = args.call[1]\n bins = list(map(binsize_type, args.call[2:]))\n else:\n event_type = \"both\"\n bins = list(map(binsize_type, args.call[1:]))\n if args.use_phase:\n app.call_2d_phased(bins, chroms=args.chrom, event_type=event_type, print_calls=True,\n use_gc_corr=not args.no_gc_corr,\n rd_use_mask=args.use_mask_with_rd, snp_use_mask=not args.no_mask, snp_use_id=args.use_id,\n mcount=args.min_count, max_copy_number=args.max_copy_number,\n min_cell_fraction=args.min_cell_fraction, baf_threshold=args.baf_threshold,\n omin=args.overlap_threshold, use_hom=args.use_hom, anim=args.animation)\n else:\n app.call_2d(bins, chroms=args.chrom, event_type=event_type, print_calls=True,\n use_gc_corr=not args.no_gc_corr,\n rd_use_mask=args.use_mask_with_rd, snp_use_mask=not args.no_mask, snp_use_id=args.use_id,\n mcount=args.min_count, max_copy_number=args.max_copy_number,\n min_cell_fraction=args.min_cell_fraction, baf_threshold=args.baf_threshold,\n omin=args.overlap_threshold, use_hom=args.use_hom, anim=args.animation)\n else:\n app.call(list(map(binsize_type, args.call)), chroms=args.chrom, print_calls=True,\n use_gc_corr=not args.no_gc_corr, use_mask=args.use_mask_with_rd)",
"def main(args):\n master, result = pathlib.Path(args.master), pathlib.Path(args.result)\n\n # update Exam.autograder_format\n assert args.format in [\"otter\", \"ok\"], f\"Autograder format {args.format} invalid\"\n Exam.autograder_format = args.format\n\n # load notebook and parse\n nb = nbformat.read(master, as_version=NB_VERSION)\n parse_notebook(nb)\n\n # seed np.random in advance of creating student versions\n seed = args.seed or Exam.config.get(\"seed\", 42)\n np.random.seed(seed)\n\n # create autograder notebook\n nb_name = master.name\n create_and_write_autograder_exam(result / \"autograder\", nb_name)\n\n # create exams\n for i in range(Exam.config[\"num_students\"]):\n if (i + 1) % 50 == 0 and not args.quiet:\n print(f\"Generating exam {i + 1}\")\n output_dir = result / f\"exam_{i}\"\n create_and_write_exam_instance(output_dir, nb_name, Exam.config[\"num_questions\"])\n\n # all_tests_path = result / 'tests'\n # os.makedirs(all_tests_path, exist_ok=True)\n # write_all_version_tests(all_tests_path)\n\n # generate Gradescope zip file\n if Exam.config.get(\"generate\", {}):\n if not args.quiet:\n print(\"Generating autograder zip file...\")\n generate(args.result, Exam.config.get(\"generate\"))",
"def main():\n arg_parser = argparse.ArgumentParser(description=\"\"\"\n This utility will take a SAM alignment file from paired end reads \n and filter the original read FASTQ files do those reads without\n high-likelihood alignments to human.\n For gzipped alignments, consider using pipes: \n gunzip -c ref.fna.gz | strip_mt_ebv.py | gzip > ref.nomtebv.fna.gz\n \"\"\")\n\n arg_parser.add_argument(\n '--alnfile', '-A',\n type=argparse.FileType('r'),\n help='Alignment File. Can be stdin. For gzip, consider pipes',\n default=sys.stdin\n )\n arg_parser.add_argument(\n '--r1in', '-1',\n required=True,\n help='Input fastq file for R1'\n )\n arg_parser.add_argument(\n '--r2in', '-2',\n required=True,\n help='Input fastq file for R2'\n )\n arg_parser.add_argument(\n '--r1out', '-o1',\n required=True,\n help='Output fastq file for R1'\n )\n arg_parser.add_argument(\n '--r2out', '-o2',\n required=True,\n help='Output fastq file for R2'\n )\n arg_parser.add_argument(\n '--mapq',\n default=30,\n type=int,\n help='Minimum mapq required to be considered a valid read'\n )\n arg_parser.add_argument(\n '--cov_min',\n type=float,\n default=0.9\n )\n\n args = arg_parser.parse_args()\n\n passed_ids = get_passing_ids(\n args.alnfile,\n args.mapq,\n args.cov_min,\n )\n\n filter_fastq(\n passed_ids,\n args.r1in,\n args.r2in,\n args.r1out,\n args.r2out\n )"
] | [
"0.6715519",
"0.6451304",
"0.6105899",
"0.60812104",
"0.60113335",
"0.5978272",
"0.5951038",
"0.589538",
"0.56262004",
"0.5610903",
"0.5578043",
"0.5525972",
"0.5519358",
"0.5513471",
"0.5505446",
"0.5498328",
"0.54912096",
"0.5486555",
"0.5481968",
"0.5479372",
"0.54618645",
"0.54523623",
"0.5425426",
"0.539427",
"0.53694236",
"0.53573036",
"0.5350083",
"0.5326016",
"0.53203505",
"0.529691"
] | 0.7081002 | 0 |
Perform a bit_resize operation with resize from front flags. | def test_bit_resize_from_front(self):
ops = [bitwise_operations.bit_resize(self.test_bin_ones, 10, resize_flags=aerospike.BIT_RESIZE_FROM_FRONT)]
self.as_connection.operate(self.test_key, ops)
_, _, bins = self.as_connection.get(self.test_key)
# We should not have changed the zeroes bin
assert bins[self.test_bin_zeroes] == self.five_zero_blob
assert len(bins[self.test_bin_ones]) == 10
# We expect the newly added zeroes to be added to the front of the bytearray
assert bins[self.test_bin_ones] == bytearray([0] * 5 + [1] * 5) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_bit_resize_shrink_from_front(self):\n ops = [bitwise_operations.bit_resize(self.zero_one_bin, 5, resize_flags=aerospike.BIT_RESIZE_FROM_FRONT)]\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n\n assert len(bins[self.zero_one_bin]) == 5\n assert bins[self.zero_one_bin] == bytearray([1] * 5)",
"def test_bit_resize_shrink_only_does_not_allow_grow(self):\n ops = [bitwise_operations.bit_resize(self.test_bin_ones, 10, resize_flags=aerospike.BIT_RESIZE_SHRINK_ONLY)]\n with pytest.raises(e.InvalidRequest):\n self.as_connection.operate(self.test_key, ops)",
"def test_bit_resize_grow_only_does_not_allow_shrink(self):\n ops = [bitwise_operations.bit_resize(self.test_bin_ones, 1, resize_flags=aerospike.BIT_RESIZE_GROW_ONLY)]\n with pytest.raises(e.InvalidRequest):\n self.as_connection.operate(self.test_key, ops)",
"def test_bit_resize_shrink_removes_from_end(self):\n ops = [bitwise_operations.bit_resize(self.zero_one_bin, 5)]\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n\n assert len(bins[self.zero_one_bin]) == 5\n assert bins[self.zero_one_bin] == bytearray([0] * 5)",
"def test_bit_resize_grow_only_allows_grow(self):\n ops = [bitwise_operations.bit_resize(self.test_bin_ones, 10, resize_flags=aerospike.BIT_RESIZE_GROW_ONLY)]\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n\n assert len(bins[self.test_bin_ones]) == 10",
"def test_bit_resize_shrink_only_allows_shrink(self):\n ops = [bitwise_operations.bit_resize(self.test_bin_ones, 1, resize_flags=aerospike.BIT_RESIZE_SHRINK_ONLY)]\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n\n assert len(bins[self.test_bin_ones]) == 1",
"def test_bit_resize_defaults(self):\n ops = [bitwise_operations.bit_resize(self.test_bin_ones, 10)]\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n # We should not have changed the zeroes bin\n assert bins[self.test_bin_zeroes] == self.five_zero_blob\n\n assert len(bins[self.test_bin_ones]) == 10\n # We expect the newly added zeroes to be added to the end of the bytearray\n assert bins[self.test_bin_ones] == bytearray([1] * 5 + [0] * 5)",
"def test_bit_resize_update_only_allows_update(self):\n bit_policy = {\"bit_write_flags\": aerospike.BIT_WRITE_UPDATE_ONLY}\n ops = [bitwise_operations.bit_resize(self.test_bin_zeroes, 10, policy=bit_policy)]\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n assert bins[self.test_bin_zeroes] == bytearray([0] * 10)",
"def test_bit_resize_partial_no_fail_duplicate(self):\n bit_policy = {\n \"bit_write_flags\": aerospike.BIT_WRITE_CREATE_ONLY\n | aerospike.BIT_WRITE_NO_FAIL\n | aerospike.BIT_WRITE_PARTIAL\n }\n ops = [\n bitwise_operations.bit_resize(self.test_bin_zeroes, 15, policy=bit_policy),\n bitwise_operations.bit_resize(self.test_bin_zeroes, 20),\n ]\n self.as_connection.operate(self.test_key, ops)\n _, _, bins = self.as_connection.get(self.test_key)\n assert bins[self.test_bin_zeroes] == bytearray([0] * 20)",
"def test_bit_resize_partial_no_fail(self):\n bit_policy = {\"bit_write_flags\": aerospike.BIT_WRITE_UPDATE_ONLY | aerospike.BIT_WRITE_NO_FAIL}\n ops = [bitwise_operations.bit_resize(\"new_binname\", 10, policy=bit_policy)]\n self.as_connection.operate(self.test_key, ops)\n _, _, bins = self.as_connection.get(self.test_key)\n assert \"new_binname\" not in bins",
"def test_bit_resize_update_only_prevents_create(self):\n bit_policy = {\"bit_write_flags\": aerospike.BIT_WRITE_UPDATE_ONLY}\n ops = [bitwise_operations.bit_resize(\"new_binname\", 10, policy=bit_policy)]\n with pytest.raises(e.BinNotFound):\n self.as_connection.operate(self.test_key, ops)",
"def test_bit_resize_create_only_prevents_update(self):\n bit_policy = {\"bit_write_flags\": aerospike.BIT_WRITE_CREATE_ONLY}\n ops = [bitwise_operations.bit_resize(self.test_bin_zeroes, 10, policy=bit_policy)]\n with pytest.raises(e.BinExistsError):\n self.as_connection.operate(self.test_key, ops)",
"def _resize(self, cap):\n old = self._data\n self._data = [None] * cap\n walk = self._front\n for i in range(self._size):\n self._data[i] = old[walk]\n walk = (walk + 1) % len(old)\n self._front = 0",
"def resize(self, old, new):",
"def test_bit_resize_create_only_allows_create(self):\n bit_policy = {\"bit_write_flags\": aerospike.BIT_WRITE_CREATE_ONLY}\n ops = [bitwise_operations.bit_resize(\"new_bin_name\", 10, policy=bit_policy)]\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n assert bins[\"new_bin_name\"] == bytearray([0] * 10)",
"def test_bit_resize_default_allows_create(self):\n ops = [bitwise_operations.bit_resize(\"new_bin_name\", 10)]\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n assert bins[\"new_bin_name\"] == bytearray([0] * 10)",
"def resize(self, *args):\n return _ida_hexrays.hexwarns_t_resize(self, *args)",
"def _resize(self, new_cap):\n new_array = ba(new_cap)\n\n for i in range(self.count):\n new_array[i] = self.the_array[i]\n\n self.the_array = new_array\n self.capacity = new_cap",
"def _do_adaptive_shrinking(self, im):\n im_sz = list(im.shape)\n dim = len(im_sz)\n dim_to_pad = [dim_sz%self.adaptive_padding!=0 and dim_sz>3 for dim_sz in im_sz]\n dim_rem = [dim_sz//self.adaptive_padding for dim_sz in im_sz]\n new_dim_sz = [(dim_rem[i])*self.adaptive_padding if dim_to_pad[i] else im_sz[i] for i in range(dim)]\n before_id = [(-new_dim_sz[i] +im_sz[i]+1)//2 for i in range(dim)]\n after_id = [new_dim_sz[i] + before_id[i] for i in range(dim)]\n new_img = im[before_id[0]:after_id[0],before_id[1]:after_id[1],before_id[2]:after_id[2]].copy()\n return new_img",
"def reshape_masks(masks, \n resize_shape,\n ):\n _reshaped_masks = np.array([cv2.resize(_lr, tuple(resize_shape[-2:]), \n interpolation=cv2.INTER_NEAREST) for _lr in masks])\n return _reshaped_masks",
"def resize(self, size):\n if len(size) != len(self._Fkernel.shape[1:-1]):\n raise RuntimeError(\"length of resize shape is incorrect.\")\n if not np.all(size >= self._Fkernel.shape[1:-1]):\n raise RuntimeError(\"resize shape is too small.\")\n kernel = self._frequency_2_real()\n kernel_pad = self._zero_pad(kernel, size)\n self._Fkernel = self._real_2_frequency(kernel_pad)\n self.basis._axes_shape = kernel_pad.shape[1:-1]",
"def resize(self, *args):\n return _ida_hexrays.qvector_ccase_t_resize(self, *args)",
"def resize_mask_like(mask, x):\n mask_resize = resize(\n mask, to_shape=x.get_shape().as_list()[1:3],\n func=tf.image.resize_nearest_neighbor)\n return mask_resize",
"def _resize(self, cap): # nonpublic utitity\n B = self._make_array(cap) # new (bigger) array\n for k in range(self._size): # for each existing value\n B[k] = self._Array[k]\n self._Array = B # use the bigger array\n self._capacity = cap",
"def shrink_mask(self):\n m = self._mask\n if m.ndim and not m.any():\n self._mask = nomask\n return self",
"def resize_tensor(tensor, new_shape):\n channels = tensor.shape[0]\n new_tensor = np.zeros(shape=(channels,) + new_shape)\n for i in range(0, channels):\n new_tensor[i] = cv2.resize(tensor[i], dsize=new_shape[::-1])\n\n return new_tensor",
"def resize_128(img): \n return cv2.resize(img,(128,128))",
"def _shrink_secondary(self, amt):\n self._resize_secondary(-amt)",
"def shiftr_bitmask(self):\r\n self.__bitmask__ = self.__bitmask__ >> 1",
"def resize(self, *args):\n return _ida_hexrays.qvector_carg_t_resize(self, *args)"
] | [
"0.78656524",
"0.7009931",
"0.6986943",
"0.69517285",
"0.67249167",
"0.65363735",
"0.607698",
"0.60718304",
"0.6062587",
"0.59940326",
"0.5831852",
"0.5756714",
"0.5624054",
"0.5552826",
"0.551713",
"0.54809797",
"0.5480332",
"0.5291675",
"0.5257721",
"0.52422786",
"0.52366495",
"0.5222313",
"0.5214901",
"0.5202471",
"0.51002836",
"0.51002353",
"0.5025992",
"0.49971706",
"0.49783427",
"0.4976663"
] | 0.74251884 | 1 |
By default we can create a new bin with resize. | def test_bit_resize_default_allows_create(self):
ops = [bitwise_operations.bit_resize("new_bin_name", 10)]
self.as_connection.operate(self.test_key, ops)
_, _, bins = self.as_connection.get(self.test_key)
assert bins["new_bin_name"] == bytearray([0] * 10) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_bit_resize_create_only_allows_create(self):\n bit_policy = {\"bit_write_flags\": aerospike.BIT_WRITE_CREATE_ONLY}\n ops = [bitwise_operations.bit_resize(\"new_bin_name\", 10, policy=bit_policy)]\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n assert bins[\"new_bin_name\"] == bytearray([0] * 10)",
"def test_bit_resize_grow_only_allows_grow(self):\n ops = [bitwise_operations.bit_resize(self.test_bin_ones, 10, resize_flags=aerospike.BIT_RESIZE_GROW_ONLY)]\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n\n assert len(bins[self.test_bin_ones]) == 10",
"def test_bit_resize_defaults(self):\n ops = [bitwise_operations.bit_resize(self.test_bin_ones, 10)]\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n # We should not have changed the zeroes bin\n assert bins[self.test_bin_zeroes] == self.five_zero_blob\n\n assert len(bins[self.test_bin_ones]) == 10\n # We expect the newly added zeroes to be added to the end of the bytearray\n assert bins[self.test_bin_ones] == bytearray([1] * 5 + [0] * 5)",
"def test_bit_resize_shrink_removes_from_end(self):\n ops = [bitwise_operations.bit_resize(self.zero_one_bin, 5)]\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n\n assert len(bins[self.zero_one_bin]) == 5\n assert bins[self.zero_one_bin] == bytearray([0] * 5)",
"def test_bit_resize_shrink_only_allows_shrink(self):\n ops = [bitwise_operations.bit_resize(self.test_bin_ones, 1, resize_flags=aerospike.BIT_RESIZE_SHRINK_ONLY)]\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n\n assert len(bins[self.test_bin_ones]) == 1",
"def resize(self, old, new):",
"def test_bit_resize_grow_only_does_not_allow_shrink(self):\n ops = [bitwise_operations.bit_resize(self.test_bin_ones, 1, resize_flags=aerospike.BIT_RESIZE_GROW_ONLY)]\n with pytest.raises(e.InvalidRequest):\n self.as_connection.operate(self.test_key, ops)",
"def test_bit_resize_shrink_only_does_not_allow_grow(self):\n ops = [bitwise_operations.bit_resize(self.test_bin_ones, 10, resize_flags=aerospike.BIT_RESIZE_SHRINK_ONLY)]\n with pytest.raises(e.InvalidRequest):\n self.as_connection.operate(self.test_key, ops)",
"def resize(self):\n pass",
"def test_bit_resize_partial_no_fail(self):\n bit_policy = {\"bit_write_flags\": aerospike.BIT_WRITE_UPDATE_ONLY | aerospike.BIT_WRITE_NO_FAIL}\n ops = [bitwise_operations.bit_resize(\"new_binname\", 10, policy=bit_policy)]\n self.as_connection.operate(self.test_key, ops)\n _, _, bins = self.as_connection.get(self.test_key)\n assert \"new_binname\" not in bins",
"def test_bit_resize_create_only_prevents_update(self):\n bit_policy = {\"bit_write_flags\": aerospike.BIT_WRITE_CREATE_ONLY}\n ops = [bitwise_operations.bit_resize(self.test_bin_zeroes, 10, policy=bit_policy)]\n with pytest.raises(e.BinExistsError):\n self.as_connection.operate(self.test_key, ops)",
"def test_bit_resize_shrink_from_front(self):\n ops = [bitwise_operations.bit_resize(self.zero_one_bin, 5, resize_flags=aerospike.BIT_RESIZE_FROM_FRONT)]\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n\n assert len(bins[self.zero_one_bin]) == 5\n assert bins[self.zero_one_bin] == bytearray([1] * 5)",
"def test_bit_resize_update_only_prevents_create(self):\n bit_policy = {\"bit_write_flags\": aerospike.BIT_WRITE_UPDATE_ONLY}\n ops = [bitwise_operations.bit_resize(\"new_binname\", 10, policy=bit_policy)]\n with pytest.raises(e.BinNotFound):\n self.as_connection.operate(self.test_key, ops)",
"def test_bit_resize_partial_no_fail_duplicate(self):\n bit_policy = {\n \"bit_write_flags\": aerospike.BIT_WRITE_CREATE_ONLY\n | aerospike.BIT_WRITE_NO_FAIL\n | aerospike.BIT_WRITE_PARTIAL\n }\n ops = [\n bitwise_operations.bit_resize(self.test_bin_zeroes, 15, policy=bit_policy),\n bitwise_operations.bit_resize(self.test_bin_zeroes, 20),\n ]\n self.as_connection.operate(self.test_key, ops)\n _, _, bins = self.as_connection.get(self.test_key)\n assert bins[self.test_bin_zeroes] == bytearray([0] * 20)",
"def augmentBin(bin, size, binLabel, data_path):\n # copy ratings of the original images to the new ratings file\n newRatings = open(new_ratings_file_path, 'a')\n for imagePath, rating in bin:\n newRatings.write(getRatingsLine(imagePath, rating))\n newRatings.close()\n # determine number of left images and generate them\n augmentationFactor = np.ceil(float(size) / len(bin))\n print(\"augmenting bin [\" + str(binLabel) + \"] (size: \" + str(len(bin)) + \", augmentationFactor: \" + str(\n augmentationFactor) + \")\")\n if augmentationFactor <= 1:\n return\n leftImages = size - len(bin)\n augmentedBin = []\n for imagePath, rating in bin:\n # determine how many images should be generated\n num_to_generate = augmentationFactor - 1\n actual_to_generate = num_to_generate if num_to_generate <= leftImages else leftImages\n num_generated = augmentImageByRotation(imagePath, actual_to_generate, binLabel, data_path)\n leftImages -= num_generated\n # break if no more images needed\n if leftImages <= 0:\n break",
"def test_bit_resize_from_front(self):\n ops = [bitwise_operations.bit_resize(self.test_bin_ones, 10, resize_flags=aerospike.BIT_RESIZE_FROM_FRONT)]\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n # We should not have changed the zeroes bin\n assert bins[self.test_bin_zeroes] == self.five_zero_blob\n\n assert len(bins[self.test_bin_ones]) == 10\n # We expect the newly added zeroes to be added to the front of the bytearray\n assert bins[self.test_bin_ones] == bytearray([0] * 5 + [1] * 5)",
"def bin_binarise(self):\n pass",
"def test_bit_resize_update_only_allows_update(self):\n bit_policy = {\"bit_write_flags\": aerospike.BIT_WRITE_UPDATE_ONLY}\n ops = [bitwise_operations.bit_resize(self.test_bin_zeroes, 10, policy=bit_policy)]\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n assert bins[self.test_bin_zeroes] == bytearray([0] * 10)",
"def createBin(image_object, num=8):\n image_array = sitk.GetArrayFromImage(image_object)\n _, bin_edges = np.histogram(image_array.flatten(), bins=num)\n bin_edges[-1] += 1\n for i in range(num):\n image_array[(image_array >= bin_edges[i]) & (image_array < bin_edges[i+1])] = i+1\n image_object_bin = sitk.GetImageFromArray(image_array)\n return image_object_bin",
"def reduceBin(bin, size, binLabel):\n print(\"reducing bin [\" + str(binLabel) + \"] (size: \" + str(len(bin)) + \")\")\n np.random.shuffle(bin)\n chosenImages = bin[:size]\n newRatings = open(new_ratings_file_path, 'a')\n for image in chosenImages:\n newRatings.write(getRatingsLine(image[0], image[1]))\n newRatings.close()",
"def resize(self, *args):\n return _ida_hexrays.hexwarns_t_resize(self, *args)",
"def rebin(self, *args, **kwargs):\n return _image.image_rebin(self, *args, **kwargs)",
"def bdev_rbd_resize(client, name, new_size):\n params = {\n 'name': name,\n 'new_size': new_size,\n }\n return client.call('bdev_rbd_resize', params)",
"def _shrink(self):\n raise NotImplementedError(\"Should have implemented this.\")",
"def _resize(self, new_cap):\n new_array = ba(new_cap)\n\n for i in range(self.count):\n new_array[i] = self.the_array[i]\n\n self.the_array = new_array\n self.capacity = new_cap",
"def _assign_sizes(self):",
"def body_resize(self):",
"def rebin_plot(histogram, bins_array):\n newname = histogram.GetName()+'_rebinned'\n newplot = histogram.Rebin(len(bins_array)-1, newname, bins_array)\n newplot.SetDirectory(0)\n\n #print \"found overflow for\", newname, \"of\", overflow\n #newplot.SetBinContent(newplot.GetNbinsX(),newplot.GetBinContent(newplot.GetNbinsX())+newplot.GetBinContent(newplot. GetNbinsX()+1))\n #newplot.SetBinError(newplot.GetNbinsX(),math.sqrt(newplot.GetBinError(newplot.GetNbinsX())**2 + newplot. GetBinError(newplot.GetNbinsX()+1)**2 ) )\n #newplot.SetBinContent(newplot.GetNbinsX()+1,0) # Set overflow to 0\n\n return newplot",
"def resize(self, width: int, height: int):\n pass",
"def handleResize(self):\n pass"
] | [
"0.7059136",
"0.6962264",
"0.6600574",
"0.6584359",
"0.6526519",
"0.6474382",
"0.6400263",
"0.63203436",
"0.6264439",
"0.62524",
"0.6250521",
"0.6248925",
"0.62143785",
"0.5967999",
"0.5863293",
"0.57773244",
"0.5767365",
"0.5744283",
"0.5743729",
"0.5714881",
"0.56769586",
"0.55928844",
"0.55700916",
"0.55105335",
"0.5452435",
"0.5446233",
"0.544016",
"0.54275805",
"0.54263824",
"0.5418767"
] | 0.74629015 | 0 |
Create a bin with resize using the create only flag. | def test_bit_resize_create_only_allows_create(self):
bit_policy = {"bit_write_flags": aerospike.BIT_WRITE_CREATE_ONLY}
ops = [bitwise_operations.bit_resize("new_bin_name", 10, policy=bit_policy)]
self.as_connection.operate(self.test_key, ops)
_, _, bins = self.as_connection.get(self.test_key)
assert bins["new_bin_name"] == bytearray([0] * 10) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_bit_resize_create_only_prevents_update(self):\n bit_policy = {\"bit_write_flags\": aerospike.BIT_WRITE_CREATE_ONLY}\n ops = [bitwise_operations.bit_resize(self.test_bin_zeroes, 10, policy=bit_policy)]\n with pytest.raises(e.BinExistsError):\n self.as_connection.operate(self.test_key, ops)",
"def test_bit_resize_default_allows_create(self):\n ops = [bitwise_operations.bit_resize(\"new_bin_name\", 10)]\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n assert bins[\"new_bin_name\"] == bytearray([0] * 10)",
"def test_bit_resize_update_only_prevents_create(self):\n bit_policy = {\"bit_write_flags\": aerospike.BIT_WRITE_UPDATE_ONLY}\n ops = [bitwise_operations.bit_resize(\"new_binname\", 10, policy=bit_policy)]\n with pytest.raises(e.BinNotFound):\n self.as_connection.operate(self.test_key, ops)",
"def test_bit_resize_grow_only_does_not_allow_shrink(self):\n ops = [bitwise_operations.bit_resize(self.test_bin_ones, 1, resize_flags=aerospike.BIT_RESIZE_GROW_ONLY)]\n with pytest.raises(e.InvalidRequest):\n self.as_connection.operate(self.test_key, ops)",
"def test_bit_resize_grow_only_allows_grow(self):\n ops = [bitwise_operations.bit_resize(self.test_bin_ones, 10, resize_flags=aerospike.BIT_RESIZE_GROW_ONLY)]\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n\n assert len(bins[self.test_bin_ones]) == 10",
"def test_bit_resize_shrink_only_does_not_allow_grow(self):\n ops = [bitwise_operations.bit_resize(self.test_bin_ones, 10, resize_flags=aerospike.BIT_RESIZE_SHRINK_ONLY)]\n with pytest.raises(e.InvalidRequest):\n self.as_connection.operate(self.test_key, ops)",
"def test_bit_resize_shrink_only_allows_shrink(self):\n ops = [bitwise_operations.bit_resize(self.test_bin_ones, 1, resize_flags=aerospike.BIT_RESIZE_SHRINK_ONLY)]\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n\n assert len(bins[self.test_bin_ones]) == 1",
"def test_bit_resize_partial_no_fail(self):\n bit_policy = {\"bit_write_flags\": aerospike.BIT_WRITE_UPDATE_ONLY | aerospike.BIT_WRITE_NO_FAIL}\n ops = [bitwise_operations.bit_resize(\"new_binname\", 10, policy=bit_policy)]\n self.as_connection.operate(self.test_key, ops)\n _, _, bins = self.as_connection.get(self.test_key)\n assert \"new_binname\" not in bins",
"def test_bit_resize_shrink_removes_from_end(self):\n ops = [bitwise_operations.bit_resize(self.zero_one_bin, 5)]\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n\n assert len(bins[self.zero_one_bin]) == 5\n assert bins[self.zero_one_bin] == bytearray([0] * 5)",
"def createBin(image_object, num=8):\n image_array = sitk.GetArrayFromImage(image_object)\n _, bin_edges = np.histogram(image_array.flatten(), bins=num)\n bin_edges[-1] += 1\n for i in range(num):\n image_array[(image_array >= bin_edges[i]) & (image_array < bin_edges[i+1])] = i+1\n image_object_bin = sitk.GetImageFromArray(image_array)\n return image_object_bin",
"def test_bit_resize_partial_no_fail_duplicate(self):\n bit_policy = {\n \"bit_write_flags\": aerospike.BIT_WRITE_CREATE_ONLY\n | aerospike.BIT_WRITE_NO_FAIL\n | aerospike.BIT_WRITE_PARTIAL\n }\n ops = [\n bitwise_operations.bit_resize(self.test_bin_zeroes, 15, policy=bit_policy),\n bitwise_operations.bit_resize(self.test_bin_zeroes, 20),\n ]\n self.as_connection.operate(self.test_key, ops)\n _, _, bins = self.as_connection.get(self.test_key)\n assert bins[self.test_bin_zeroes] == bytearray([0] * 20)",
"def test_bit_resize_shrink_from_front(self):\n ops = [bitwise_operations.bit_resize(self.zero_one_bin, 5, resize_flags=aerospike.BIT_RESIZE_FROM_FRONT)]\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n\n assert len(bins[self.zero_one_bin]) == 5\n assert bins[self.zero_one_bin] == bytearray([1] * 5)",
"def test_bit_resize_update_only_allows_update(self):\n bit_policy = {\"bit_write_flags\": aerospike.BIT_WRITE_UPDATE_ONLY}\n ops = [bitwise_operations.bit_resize(self.test_bin_zeroes, 10, policy=bit_policy)]\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n assert bins[self.test_bin_zeroes] == bytearray([0] * 10)",
"def test_bit_resize_defaults(self):\n ops = [bitwise_operations.bit_resize(self.test_bin_ones, 10)]\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n # We should not have changed the zeroes bin\n assert bins[self.test_bin_zeroes] == self.five_zero_blob\n\n assert len(bins[self.test_bin_ones]) == 10\n # We expect the newly added zeroes to be added to the end of the bytearray\n assert bins[self.test_bin_ones] == bytearray([1] * 5 + [0] * 5)",
"def create_binary(self, number):\n if not (0 <= number <= 65535 and type(number) == int):\n raise ValueError ('Binary input number must be an integer between 0 and 65535, not: {}'.format(number))\n \n pattern = np.flipud(np.array([int(i) for i in'{0:016b}'.format(number)]).reshape(4,4))\n return pattern",
"def bin_binarise(self):\n pass",
"def bdev_compress_create(client, base_bdev_name, pm_path, lb_size):\n params = {'base_bdev_name': base_bdev_name, 'pm_path': pm_path}\n\n if lb_size:\n params['lb_size'] = lb_size\n\n return client.call('bdev_compress_create', params)",
"def GrayCodePattern_create(width, height):\n pass",
"def createBitmap(self):\n return self.level.has_redundancy and self.size >= 1000 and self.format.type != \"swap\"",
"def test_bit_resize_from_front(self):\n ops = [bitwise_operations.bit_resize(self.test_bin_ones, 10, resize_flags=aerospike.BIT_RESIZE_FROM_FRONT)]\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n # We should not have changed the zeroes bin\n assert bins[self.test_bin_zeroes] == self.five_zero_blob\n\n assert len(bins[self.test_bin_ones]) == 10\n # We expect the newly added zeroes to be added to the front of the bytearray\n assert bins[self.test_bin_ones] == bytearray([0] * 5 + [1] * 5)",
"def create_full_pic(self):\n self.create_half_pic()\n mirror_update(self.flag)",
"def create_binary_masks(image_path):\n mask = cv2.imread(image_path, cv2.IMREAD_ANYDEPTH)\n size = mask.shape\n for row_pixel in range(0, size[0]):\n for column_pixel in range(0, size[1]):\n if mask[row_pixel, column_pixel] == 0:\n mask[row_pixel, column_pixel] = 65535\n\n else:\n mask[row_pixel, column_pixel] = 0\n\n cv2.imwrite(image_path[:-4]+'_binary.png', mask)",
"def rebin(self, bin_shape, operation=np.mean, operation_ignores_mask=False, handle_mask=np.all,\n propagate_uncertainties=False, new_unit=None, **kwargs):\n # Sanitize input.\n new_unit = new_unit or self.unit\n # Make sure the input bin dimensions are integers.\n bin_shape = np.rint(bin_shape).astype(int)\n offsets = (bin_shape - 1) / 2\n if all(bin_shape == 1):\n return self\n # Ensure bin_size has right number of entries and each entry is an\n # integer fraction of the array shape in each dimension.\n data_shape = self.dimensions.value.astype(int)\n naxes = len(data_shape)\n if len(bin_shape) != naxes:\n raise ValueError(\"bin_shape must have an entry for each array axis.\")\n if (np.mod(data_shape, bin_shape) != 0).any():\n raise ValueError(\n \"bin shape must be an integer fraction of the data shape in each dimension. \"\n f\"data shape: {data_shape}; bin shape: {bin_shape}\")\n\n # Reshape array so odd dimensions represent pixels to be binned\n # then apply function over those axes.\n m = None if (self.mask is None or self.mask is False or operation_ignores_mask) else self.mask\n data = self.data\n if m is not None:\n for array_type, masked_type in ARRAY_MASK_MAP.items():\n if isinstance(self.data, array_type):\n break\n else:\n masked_type = np.ma.masked_array\n warn.warning(\"data and mask arrays of different or unrecognized types. \"\n \"Casting them into a numpy masked array.\")\n data = masked_type(self.data, m)\n\n reshape = np.empty(data_shape.size + bin_shape.size, dtype=int)\n new_shape = (data_shape / bin_shape).astype(int)\n reshape[0::2] = new_shape\n reshape[1::2] = bin_shape\n reshape = tuple(reshape)\n reshaped_data = data.reshape(reshape)\n operation_axes = tuple(range(len(reshape) - 1, 0, -2))\n new_data = operation(reshaped_data, axis=operation_axes)\n if isinstance(new_data, ARRAY_MASK_MAP[np.ndarray]):\n new_data = new_data.data\n if handle_mask is None:\n new_mask = None\n elif isinstance(self.mask, (type(None), bool)): # Preserve original mask type.\n new_mask = self.mask\n else:\n reshaped_mask = self.mask.reshape(reshape)\n new_mask = handle_mask(reshaped_mask, axis=operation_axes)\n\n # Propagate uncertainties if propagate_uncertainties kwarg set.\n new_uncertainty = None\n if propagate_uncertainties:\n if self.uncertainty is None:\n warnings.warn(\"Uncertainties cannot be propagated as there are no uncertainties, \"\n \"i.e. self.uncertainty is None.\")\n elif isinstance(self.uncertainty, astropy.nddata.UnknownUncertainty):\n warnings.warn(\"self.uncertainty is of type UnknownUncertainty which does not \"\n \"support uncertainty propagation.\")\n elif (not operation_ignores_mask\n and (self.mask is True or (self.mask is not None\n and not isinstance(self.mask, bool)\n and self.mask.all()))):\n warnings.warn(\"Uncertainties cannot be propagated as all values are masked and \"\n \"operation_ignores_mask is False.\")\n else:\n if propagate_uncertainties is True:\n propagate_uncertainties = utils.cube.propagate_rebin_uncertainties\n # If propagate_uncertainties, use astropy's infrastructure.\n # For this the data and uncertainty must be reshaped\n # so the first dimension represents the flattened size of a single bin\n # while the rest represent the shape of the new data. Then the elements\n # in each bin can be iterated (all bins being treated in parallel) and\n # their uncertainties propagated.\n bin_size = bin_shape.prod()\n flat_shape = [bin_size] + list(new_shape)\n dummy_axes = tuple(range(1, len(reshape), 2))\n flat_data = np.moveaxis(reshaped_data, dummy_axes, tuple(range(naxes)))\n flat_data = flat_data.reshape(flat_shape)\n reshaped_uncertainty = self.uncertainty.array.reshape(tuple(reshape))\n flat_uncertainty = np.moveaxis(reshaped_uncertainty, dummy_axes, tuple(range(naxes)))\n flat_uncertainty = flat_uncertainty.reshape(flat_shape)\n flat_uncertainty = type(self.uncertainty)(flat_uncertainty)\n if m is not None:\n reshaped_mask = self.mask.reshape(tuple(reshape))\n flat_mask = np.moveaxis(reshaped_mask, dummy_axes, tuple(range(naxes)))\n flat_mask = flat_mask.reshape(flat_shape)\n else:\n flat_mask = None\n # Propagate uncertainties.\n new_uncertainty = propagate_uncertainties(\n flat_uncertainty, flat_data, flat_mask,\n operation=operation, operation_ignores_mask=operation_ignores_mask,\n handle_mask=handle_mask, new_unit=new_unit, **kwargs)\n\n # Resample WCS\n new_wcs = ResampledLowLevelWCS(self.wcs.low_level_wcs, bin_shape[::-1])\n\n # Reform NDCube.\n new_cube = type(self)(new_data, new_wcs, uncertainty=new_uncertainty, mask=new_mask,\n meta=self.meta, unit=new_unit)\n new_cube._global_coords = self._global_coords\n # Reconstitute extra coords\n if not self.extra_coords.is_empty:\n new_array_grids = [None if bin_shape[i] == 1 else\n np.arange(offsets[i], data_shape[i] + offsets[i], bin_shape[i])\n for i in range(naxes)]\n new_cube._extra_coords = self.extra_coords.resample(bin_shape, ndcube=new_cube)\n\n return new_cube",
"def createBinObjects(n):\n bins = []\n for i in range(n):\n \tbins.append(Bin())\n return bins",
"def rebin(self, *args, **kwargs):\n return _image.image_rebin(self, *args, **kwargs)",
"def augmentBin(bin, size, binLabel, data_path):\n # copy ratings of the original images to the new ratings file\n newRatings = open(new_ratings_file_path, 'a')\n for imagePath, rating in bin:\n newRatings.write(getRatingsLine(imagePath, rating))\n newRatings.close()\n # determine number of left images and generate them\n augmentationFactor = np.ceil(float(size) / len(bin))\n print(\"augmenting bin [\" + str(binLabel) + \"] (size: \" + str(len(bin)) + \", augmentationFactor: \" + str(\n augmentationFactor) + \")\")\n if augmentationFactor <= 1:\n return\n leftImages = size - len(bin)\n augmentedBin = []\n for imagePath, rating in bin:\n # determine how many images should be generated\n num_to_generate = augmentationFactor - 1\n actual_to_generate = num_to_generate if num_to_generate <= leftImages else leftImages\n num_generated = augmentImageByRotation(imagePath, actual_to_generate, binLabel, data_path)\n leftImages -= num_generated\n # break if no more images needed\n if leftImages <= 0:\n break",
"def create_all_mask(mask, num, stride):\n scale_factor = 1.0 / stride\n small_mask = cv2.resize(mask, (0, 0), fx=scale_factor, fy=scale_factor, interpolation=cv2.INTER_CUBIC)\n small_mask = small_mask[:, :, np.newaxis]\n return np.repeat(small_mask, num, axis=2)",
"def reduceBin(bin, size, binLabel):\n print(\"reducing bin [\" + str(binLabel) + \"] (size: \" + str(len(bin)) + \")\")\n np.random.shuffle(bin)\n chosenImages = bin[:size]\n newRatings = open(new_ratings_file_path, 'a')\n for image in chosenImages:\n newRatings.write(getRatingsLine(image[0], image[1]))\n newRatings.close()",
"def _binary_app(self):\n self.make_binary()",
"def create_empty_image(width=512, height=512):\n blank_img = np.zeros((width, height, 3), np.uint8)\n # Return instance of the class\n return ExtendedImage(blank_img)"
] | [
"0.71709293",
"0.70837414",
"0.6715232",
"0.6555181",
"0.65351737",
"0.6414123",
"0.6213422",
"0.5822711",
"0.57785696",
"0.5771307",
"0.5741651",
"0.5709636",
"0.55918306",
"0.55503404",
"0.54320246",
"0.52427995",
"0.52391505",
"0.523907",
"0.5202887",
"0.519918",
"0.51528597",
"0.5148046",
"0.51359403",
"0.5086366",
"0.50835794",
"0.5065768",
"0.50185776",
"0.4957881",
"0.4929503",
"0.49116644"
] | 0.7797645 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.